input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
<gh_stars>0
# -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-权限中心(BlueKing-IAM) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from datetime import datetime
from itertools import groupby
from typing import Any, Dict, List, Optional, Tuple
from django.conf import settings
from django.db import transaction
from django.db.models import Q
from django.utils.translation import gettext as _
from pydantic import BaseModel, parse_obj_as
from rest_framework import serializers
from backend.apps.group.models import Group, GroupAuthorizeLock
from backend.apps.policy.models import Policy as PolicyModel
from backend.apps.role.models import Role, RoleRelatedObject
from backend.apps.template.models import PermTemplatePolicyAuthorized, PermTemplatePreUpdateLock
from backend.biz.policy import PolicyBean, PolicyBeanList, PolicyOperationBiz
from backend.biz.resource import ResourceBiz
from backend.biz.role import RoleAuthorizationScopeChecker, RoleSubjectScopeChecker
from backend.biz.template import TemplateBiz, TemplateCheckBiz
from backend.common.error_codes import APIException, CodeException, error_codes
from backend.common.time import PERMANENT_SECONDS, expired_at_display
from backend.long_task.constants import TaskType
from backend.long_task.models import TaskDetail
from backend.long_task.tasks import TaskFactory
from backend.service.constants import RoleRelatedObjectType, SubjectType
from backend.service.engine import EngineService
from backend.service.group import GroupCreate, GroupMemberExpiredAt, GroupService, SubjectGroup
from backend.service.group_saas_attribute import GroupAttributeService
from backend.service.models import Policy, Subject
from backend.service.policy.query import PolicyQueryService
from backend.service.role import RoleService, UserRole
from backend.service.system import SystemService
from backend.service.template import TemplateService
from backend.util.time import utc_string_to_local
from backend.util.uuid import gen_uuid
from .action import ActionCheckBiz, ActionForCheck
from .subject import SubjectInfoList
class GroupSystemCounterBean(BaseModel):
id: str
name: str
name_en: str
custom_policy_count: int = 0
template_count: int = 0
class SubjectGroupBean(BaseModel):
id: int
name: str
description: str
expired_at: int
expired_at_display: str
created_time: datetime
# 从部门继承的信息
department_id: int = 0
department_name: str = ""
class GroupMemberBean(BaseModel):
type: str
id: str
name: str = ""
full_name: str = ""
member_count: int = 0
expired_at: int
expired_at_display: str
created_time: datetime
# 从部门继承的信息
department_id: int = 0
department_name: str = ""
class GroupRoleDict(BaseModel):
data: Dict[int, UserRole]
def get(self, group_id: int) -> Optional[UserRole]:
return self.data.get(group_id)
class GroupMemberExpiredAtBean(GroupMemberExpiredAt):
pass
class GroupCreateBean(GroupCreate):
readonly: bool = True
class GroupTemplateGrantBean(BaseModel):
system_id: str
template_id: int
policies: List[PolicyBean]
class GroupBiz:
policy_query_svc = PolicyQueryService()
template_svc = TemplateService()
system_svc = SystemService()
group_svc = GroupService()
group_attribute_svc = GroupAttributeService()
engine_svc = EngineService()
role_svc = RoleService()
# TODO 这里为什么是biz?
action_check_biz = ActionCheckBiz()
policy_operation_biz = PolicyOperationBiz()
template_biz = TemplateBiz()
resource_biz = ResourceBiz()
template_check_biz = TemplateCheckBiz()
# 直通的方法
add_members = GroupService.__dict__["add_members"]
remove_members = GroupService.__dict__["remove_members"]
update = GroupService.__dict__["update"]
get_member_count_before_expired_at = GroupService.__dict__["get_member_count_before_expired_at"]
list_exist_groups_before_expired_at = GroupService.__dict__["list_exist_groups_before_expired_at"]
batch_get_attributes = GroupAttributeService.__dict__["batch_get_attributes"]
def create_and_add_members(
self, role_id: int, name: str, description: str, creator: str, subjects: List[Subject], expired_at: int
) -> Group:
"""
创建用户组
"""
with transaction.atomic():
group = self.group_svc.create(name, description, creator)
RoleRelatedObject.objects.create_group_relation(role_id, group.id)
if subjects:
self.group_svc.add_members(group.id, subjects, expired_at)
return group
def batch_create(self, role_id: int, infos: List[GroupCreateBean], creator: str) -> List[Group]:
"""
批量创建用户组
用于管理api
"""
with transaction.atomic():
groups = self.group_svc.batch_create(parse_obj_as(List[GroupCreate], infos), creator)
group_attrs = {group.id: {"readonly": info.readonly} for group, info in zip(groups, infos)}
self.group_attribute_svc.batch_set_attributes(group_attrs)
RoleRelatedObject.objects.batch_create_group_relation(role_id, [group.id for group in groups])
return groups
def list_system_counter(self, group_id: int) -> List[GroupSystemCounterBean]:
"""
查询用户组授权的系统信息, 返回自定义权限/模板的数量
"""
subject = Subject(type=SubjectType.GROUP.value, id=str(group_id))
policy_systems_count = self.policy_query_svc.list_system_counter_by_subject(subject)
template_system_count = self.template_svc.list_system_counter_by_subject(subject)
policy_system_count_dict = {system.id: system.count for system in policy_systems_count}
template_system_count_dict = {system.id: system.count for system in template_system_count}
if len(policy_systems_count) == 0 and len(template_system_count) == 0:
return []
systems = self.system_svc.list()
group_systems: List[GroupSystemCounterBean] = []
for system in systems:
if system.id not in policy_system_count_dict and system.id not in template_system_count_dict:
continue
group_system = GroupSystemCounterBean.parse_obj(system)
# 填充系统自定义权限策略数量与模板数量
if system.id in policy_system_count_dict:
group_system.custom_policy_count = policy_system_count_dict.get(system.id)
if system.id in template_system_count_dict:
group_system.template_count = template_system_count_dict.get(system.id)
group_systems.append(group_system)
return group_systems
def check_update_policies_resource_name_and_role_scope(
self, role, system_id: str, template_id: int, policies: List[PolicyBean], subject: Subject
):
"""
检查更新策略中增量数据实例名称
检查更新策略中增量数据是否满足角色的授权范围
"""
# 查询已授权的策略数据
if template_id == 0:
old_policies = parse_obj_as(List[PolicyBean], self.policy_query_svc.list_by_subject(system_id, subject))
else:
authorized_template = PermTemplatePolicyAuthorized.objects.get_by_subject_template(subject, template_id)
old_policies = parse_obj_as(List[PolicyBean], authorized_template.data["actions"])
# 筛选出新增的策略数据
added_policy_list = PolicyBeanList(system_id, policies).sub(PolicyBeanList(system_id, old_policies))
# 校验新增数据资源实例名称是否正确
added_policy_list.check_resource_name()
# 校验权限是否满足角色的管理范围
scope_checker = RoleAuthorizationScopeChecker(role)
scope_checker.check_policies(system_id, added_policy_list.policies)
def update_policies(self, role, group_id: int, system_id: str, template_id: int, policies: List[PolicyBean]):
"""
更新用户组单个权限
"""
subject = Subject(type=SubjectType.GROUP.value, id=str(group_id))
# 检查新增的实例名字, 检查新增的实例是否满足角色的授权范围
self.check_update_policies_resource_name_and_role_scope(role, system_id, template_id, policies, subject)
# 检查策略是否与操作信息匹配
self.action_check_biz.check(system_id, [ActionForCheck.parse_obj(p.dict()) for p in policies])
policy_list = PolicyBeanList(system_id, policies, need_ignore_path=True)
# 设置过期时间为永久
for p in policy_list.policies:
p.set_expired_at(PERMANENT_SECONDS)
# 自定义权限
if template_id == 0:
self.policy_operation_biz.update(system_id, subject, policy_list.policies)
# 权限模板权限
else:
self.template_svc.update_template_auth(
subject, template_id, parse_obj_as(List[Policy], policy_list.policies)
)
def _convert_to_subject_group_beans(self, relations: List[SubjectGroup]) -> List[SubjectGroupBean]:
"""
转换类型
"""
groups = Group.objects.filter(id__in=[int(one.id) for one in relations if one.type == SubjectType.GROUP.value])
relation_dict = {one.id: one for one in relations}
relation_beans: List[SubjectGroupBean] = []
for group in groups:
relation = relation_dict.get(str(group.id))
if not relation:
continue
relation_beans.append(
SubjectGroupBean(
id=group.id,
name=group.name,
description=group.description,
expired_at=relation.policy_expired_at,
expired_at_display=expired_at_display(relation.policy_expired_at),
created_time=utc_string_to_local(relation.created_at),
department_id=relation.department_id,
department_name=relation.department_name,
)
)
return relation_beans
def list_subject_group(self, subject: Subject, is_recursive: bool = False) -> List[SubjectGroupBean]:
"""
查询subject所属的用户组
"""
relations = self.group_svc.list_subject_group(subject, is_recursive)
return self._convert_to_subject_group_beans(relations)
def list_subject_group_before_expired_at(self, subject: Subject, expired_at: int) -> List[SubjectGroupBean]:
"""
查询指定过期时间之前的用户组
"""
relations = self.group_svc.list_subject_group_before_expired_at(subject, expired_at)
return self._convert_to_subject_group_beans(relations)
def update_members_expired_at(self, group_id: int, members: List[GroupMemberExpiredAtBean]):
"""
更新用户组成员的过期时间
"""
self.group_svc.update_members_expired_at(group_id, parse_obj_as(List[GroupMemberExpiredAt], members))
def delete(self, group_id: int):
"""
删除用户组
"""
if GroupAuthorizeLock.objects.filter(group_id=group_id).exists():
raise error_codes.VALIDATE_ERROR.format(_("用户组正在授权, 不能删除!"))
subject = Subject(type=SubjectType.GROUP.value, id=str(group_id))
with transaction.atomic():
# 删除分级管理员与用户组的关系
RoleRelatedObject.objects.delete_group_relation(group_id)
# 删除权限模板与用户组的关系
self.template_biz.delete_template_auth_by_subject(subject)
# 删除所有的自定义策略
PolicyModel.objects.filter(subject_type=subject.type, subject_id=subject.id).delete()
# 删除用户组的属性
self.group_attribute_svc.batch_delete_attributes([group_id])
# 删除用户组本身
self.group_svc.delete(group_id)
def _convert_to_group_members(self, relations: List[SubjectGroup]) -> List[GroupMemberBean]:
subjects = parse_obj_as(List[Subject], relations)
subject_info_list = SubjectInfoList(subjects)
# 组合数据结构
group_member_beans = []
for subject, relation in zip(subjects, relations):
subject_info = subject_info_list.get(subject)
if not subject_info:
continue
group_member_bean = GroupMemberBean(
expired_at=relation.policy_expired_at,
expired_at_display=expired_at_display(relation.policy_expired_at),
created_time=utc_string_to_local(relation.created_at),
department_id=relation.department_id,
department_name=relation.department_name,
**subject_info.dict(),
)
group_member_beans.append(group_member_bean)
return group_member_beans
def list_paging_group_member(self, group_id: int, limit: int, offset: int) -> Tuple[int, List[GroupMemberBean]]:
"""分页查询用户组成员,并给成员填充name/full_named等相关信息"""
count, relations = self.group_svc.list_paging_group_member(group_id, limit, offset)
return count, self._convert_to_group_members(relations)
def list_paging_members_before_expired_at(
self, group_id: int, expired_at: int, limit: int = 10, offset: int = 0
) -> Tuple[int, List[GroupMemberBean]]:
"""
分页查询用户组过期的成员
"""
count, relations = self.group_svc.list_paging_members_before_expired_at(group_id, expired_at, limit, offset)
return count, self._convert_to_group_members(relations)
def list_pre_application_groups(self, policy_list: PolicyBeanList) -> List[int]:
"""
获取用户预申请的用户组列表
"""
system_id, policies = policy_list.system_id, policy_list.policies
try:
policy_resources = self.engine_svc.gen_search_policy_resources(policies)
# 填充资源实例的属性
for pr in policy_resources:
if len(pr.resources) != 0:
self._fill_resources_attribute(pr.resources)
results = self.engine_svc.query_subjects_by_policy_resources(
system_id, policy_resources, SubjectType.GROUP.value
)
except APIException:
return []
# 取结果的交集
subject_id_set = None
for res in results:
ids = {subject["id"] for subject in res}
if subject_id_set is None:
subject_id_set = ids
else:
subject_id_set = subject_id_set & ids
return [int(_id) for _id in subject_id_set] if subject_id_set else []
def _fill_resources_attribute(self, resources: List[Dict[str, Any]]):
"""
用户组通过policy查询subjects的资源填充属性
"""
need_fetch_resources = []
for resource in resources:
if resource["id"] != "*" and not resource["attribute"]:
need_fetch_resources.append(resource)
if not need_fetch_resources:
return
for key, parts in groupby(need_fetch_resources, key=lambda resource: (resource["system"], resource["type"])):
self._exec_fill_resources_attribute(key[0], key[1], list(parts))
def _exec_fill_resources_attribute(self, system_id, resource_type_id, resources):
# 查询属性
resource_ids = list({resource["id"] for resource in resources})
resource_info_dict = self.resource_biz.fetch_auth_attributes(
system_id, resource_type_id, resource_ids, raise_api_exception=False
)
# 填充属性
for resource in resources:
_id = resource["id"]
if not resource_info_dict.has(_id):
continue
attrs = resource_info_dict.get_attributes(_id, ignore_none_value=True)
# 填充
resource["attribute"] = attrs
def _check_lock_before_grant(self, group: Group, templates: List[GroupTemplateGrantBean]):
"""
检查用户组是否满足授权条件
"""
# 权限模板
template_ids = [template.template_id for template in templates if template.template_id != 0]
# 自定义权限涉及的系统
custom_action_system_ids = [template.system_id for template in templates if template.template_id == 0]
# 判断是否有权限模板正在同步更新
if PermTemplatePreUpdateLock.objects.filter(template_id__in=template_ids).exists():
raise error_codes.VALIDATE_ERROR.format(_("部分权限模板正在更新, 不能授权!"))
# 判断该用户组在长时任务里是否正在添加涉及到的权限模板和自定义权限
if (
GroupAuthorizeLock.objects.filter(group_id=group.id)
.filter(Q(template_id__in=template_ids) | (Q(template_id=0) & Q(system_id__in=custom_action_system_ids)))
.exists()
):
raise error_codes.VALIDATE_ERROR.format(_("部分权限模板或自定义权限已经在授权中, 不能重复授权!"))
def check_before_grant(
self, group: Group, templates: List[GroupTemplateGrantBean], role: Role, need_check_resource_name=True
):
"""
检查用户组授权自定义权限或模板
(1)模板操作是否超过原始模板的范围
(2)权限是否超过分级管理员的授权范围
(3)检查实例名称是否正确
"""
subject = Subject(type=SubjectType.GROUP.value, id=str(group.id))
# 这里遍历时,兼容了自定义权限和模板权限的检查
for template in templates:
action_ids = [p.action_id for p in template.policies]
if template.template_id != 0:
# 检查操作列表是否与模板一致
self.template_check_biz.check_add_member(template.template_id, subject, action_ids)
else:
# 检查操作列表是否为新增自定义权限
self._valid_grant_actions_not_exists(subject, template.system_id, action_ids)
try:
# 校验资源的名称是否一致
if need_check_resource_name:
template_policy_list = PolicyBeanList(system_id=template.system_id, policies=template.policies)
template_policy_list.check_resource_name()
# 检查策略是否在role的授权范围内
scope_checker = RoleAuthorizationScopeChecker(role)
scope_checker.check_policies(template.system_id, template.policies)
except CodeException as e:
raise error_codes.VALIDATE_ERROR.format(
_("系统: {} 模板: {} 校验错误: {}").format(template.system_id, template.template_id, e.message),
replace=True,
)
def _valid_grant_actions_not_exists(self, subject: Subject, system_id, action_ids: List[str]):
"""
校验授权的操作没有重复授权
"""
policy_list = self.policy_query_svc.new_policy_list_by_subject(system_id, subject)
for action_id in action_ids:
if policy_list.get(action_id):
raise error_codes.VALIDATE_ERROR.format(_("系统: {} 的操作: {} 权限已存在").format(system_id, action_id))
def _gen_grant_lock(self, subject: Subject, template: GroupTemplateGrantBean, uuid: str) -> GroupAuthorizeLock:
"""
生成用户组授权的信息锁
"""
# 设置过期时间为永久
for p in template.policies:
p.set_expired_at(PERMANENT_SECONDS)
lock = GroupAuthorizeLock(
template_id=template.template_id, group_id=int(subject.id), system_id=template.system_id, key=uuid
)
lock.data = {"actions": [p.dict() for p in template.policies]} # type: ignore
return lock
def grant(self, | |
<reponame>pkgw/vernon
# -*- mode: python; coding: utf-8 -*-
# Copyright 2015-2018 <NAME> and collaborators.
# Licensed under the MIT License.
"""Different particle distributions.
These are all expressed relative to the magnetic field coordinate system.
"""
from __future__ import absolute_import, division, print_function
__all__ = '''
TorusDistribution
WasherDistribution
PancakeTorusDistribution
PancakeWasherDistribution
PexpPancakeWasherDistribution
GriddedDistribution
DG83Distribution
'''.split()
import numpy as np
import six
from six.moves import range
from pwkit import astutil, cgs
from pwkit.astutil import halfpi, twopi
from pwkit.numutil import broadcastize
from .bases import Distribution
from .config import Configuration
from .geometry import sph_to_cart, BodyConfiguration
class TorusDistribution(Distribution):
"""A uniformly filled torus where the parameters of the electron energy
distribution are fixed."""
__section__ = 'torus-distribution'
major_radius = 3.0
""""Major radius", I guess, in units of the body's radius."""
minor_radius = 1.0
""""Minor radius", I guess, in units of the body's radius."""
n_e = 1e4
"""The density of energetic electrons in the torus, in units of total
electrons per cubic centimeter.
"""
power_law_p = 3
"The power-law index of the energetic electrons, such that N(>E) ~ E^(-p)."
pitch_angle_k = 1
"The power-law index of the pitch angle distribution in sin(theta)."
_parameter_names = ['n_e', 'p', 'k']
@broadcastize(3, (0, 0, 0))
def get_samples(self, mlat, mlon, L, just_ne=False):
"""Sample properties of the electron distribution at the specified locations
in magnetic field coordinates. Arguments are magnetic latitude,
longitude, and McIlwain L parameter.
Returns: (n_e, p), where
n_e
Array of electron densities corresponding to the provided coordinates.
Units of electrons per cubic centimeter.
p
Array of power-law indices of the electrons at the provided coordinates.
"""
r = L * np.cos(mlat)**2
x, y, z = sph_to_cart(mlat, mlon, r)
# Thanks, Internet! (Ironically, the first formula I copied was incorrect!)
a = self.major_radius
b = self.minor_radius
q = (x**2 + y**2 + z**2 - (a**2 + b**2))**2 - 4 * a**2 * (b**2 - z**2)
inside = (q < 0)
n_e = np.zeros(mlat.shape)
n_e[inside] = self.n_e
p = np.empty(mlat.shape)
p.fill(self.power_law_p)
k = np.empty(mlat.shape)
k.fill(self.pitch_angle_k)
return n_e, p, k
class WasherDistribution(Distribution):
"""A hard-edged "washer" shape."""
__section__ = 'washer-distribution'
r_inner = 2.0
"Inner radius, in units of the body's radius."
r_outer = 7.0
"Outer radius, in units of the body's radius."
thickness = 0.7
"""Washer thickness, in units of the body's radius. Note that the washer
will extend in the magnetic z coordinate from ``-thickness/2`` to
``+thickness/2``."""
n_e = 1e5
"""The density of energetic electrons in the washer, in units of total
electrons per cubic centimeter."""
power_law_p = 3.0
"The power-law index of the energetic electrons, such that N(>E) ~ E^(-p)."
pitch_angle_k = 1.0
"The power-law index of the pitch angle distribution in sin(theta)."
radial_concentration = 0.0
"""A power-law index giving the degree to which n_e increases toward the
inner edge of the washer:
n_e(r) \propto [(r_out - r) / (r_out - r_in)]^radial_concentration
Zero implies a flat distribution; 1 implies a linear increase from outer
to inner. The total number of electrons in the washer is conserved.
"""
_parameter_names = ['n_e', 'p', 'k']
_density_factor = None
@broadcastize(3, (0, 0, 0))
def get_samples(self, mlat, mlon, L, just_ne=False):
"""Sample properties of the electron distribution at the specified locations
in magnetic field coordinates. Arguments are magnetic latitude,
longitude, and McIlwain L parameter.
Returns: (n_e, p), where
n_e
Array of electron densities corresponding to the provided coordinates.
Units of electrons per cubic centimeter.
p
Array of power-law indices of the electrons at the provided coordinates.
Unless the ``fake_k`` keyword has been provided.
"""
if self._density_factor is None:
# We want the total number of electrons to stay constant if
# radial_concentration changes. In the simplest case,
# radial_concentration is zero, n_e is spatially uniform, and
#
# N = n_e * thickness * pi * (r_outer**2 - r_inner**2).
#
# In the less trivial case, n_e(r) ~ ((r_out - r)/(r_out -
# r_in))**c. Denote the constant of proportionality
# `density_factor`. If you work out the integral for N in the
# generic case and simplify, you get the following. Note that if c
# = 0, you get density_factor = n_e as you would hope.
c = self.radial_concentration
numer = float(self.n_e) * (self.r_outer**2 - self.r_inner**2)
denom = (2 * (self.r_outer - self.r_inner) * \
((c + 1) * self.r_inner + self.r_outer) / ((c + 1) * (c + 2)))
self._density_factor = numer / denom
r = L * np.cos(mlat)**2
x, y, z = sph_to_cart(mlat, mlon, r)
r2 = x**2 + y**2
inside = (r2 > self.r_inner**2) & (r2 < self.r_outer**2) & (np.abs(z) < 0.5 * self.thickness)
n_e = np.zeros(mlat.shape)
n_e[inside] = self._density_factor * ((self.r_outer - r[inside]) /
(self.r_outer - self.r_inner))**self.radial_concentration
p = np.empty(mlat.shape)
p.fill(self.power_law_p)
k = np.empty(mlat.shape)
k.fill(self.pitch_angle_k)
return n_e, p, k
class PancakeTorusDistribution(Distribution):
"""A distribution where the overall particle distribution is a uniform torus,
but the parameters smoothly interpolate to different values in a "pancake" along
the magnetic equator.
This is meant to provide a relatively simple analytic approximation to a
Jupiter-like particle distribution, which more or less has a two-component
particle distribution consisting of a more isotropic population (the
torus-y part) and an equatorial population (the pancake-y part).
"""
__section__ = 'pancake-torus-distribution'
major_radius = 3.0
""""Major radius" of the torus shape, I guess, in units of the body's
radius.
"""
minor_radius = 1.0
""""Minor radius" of the torus shape, I guess, in units of the body's
radius.
"""
n_e_torus = 1e4
"""The density of energetic electrons in the torus component, in units of
total electrons per cubic centimeter.
"""
n_e_pancake = 1e6
"""The density of energetic electrons in the pancake component, in units of
total electrons per cubic centimeter. The modeled density will interpolate
smoothly to this value in the "pancake" zone.
"""
power_law_p = 3
"The power-law index of the energetic electrons, such that N(>E) ~ E^(-p)."
pitch_angle_k_torus = 1
"""The power-law index of the pitch angle distribution in sin(theta) in the
torus component.
"""
pitch_angle_k_pancake = 9
"""The power-law index of the pitch angle distribution in sin(theta) in the
pancake component. The modeled power law index will interpolate smoothly
to this value in the "pancake" zone.
"""
pancake_fwhm = 0.4
"""The FWHM of the pancake layer, in units of the body's radius. The pancake
zone is defined as having a profile of ``clipped_cos(z_norm)^5``, where z
is the magnetic z coordinate (i.e., vertical displacement out of the
magnetic equator) normalized such that the full-width at half-maximum
(FWHM) of the resulting profile is the value specified here. The ``cos``
function is clipped in the sense that values of z far beyond the equator
are 0.
"""
_parameter_names = ['n_e', 'p', 'k']
@broadcastize(3, (0, 0, 0))
def get_samples(self, mlat, mlon, L, just_ne=False):
r = L * np.cos(mlat)**2
x, y, z = sph_to_cart(mlat, mlon, r)
a = self.major_radius
b = self.minor_radius
q = (x**2 + y**2 + z**2 - (a**2 + b**2))**2 - 4 * a**2 * (b**2 - z**2)
inside_torus = (q < 0)
z_norm = z[inside_torus] * 1.0289525193081477 / self.pancake_fwhm
pancake_factor = np.cos(z_norm)**5
pancake_factor[np.abs(z_norm) > 0.5 * np.pi] = 0.
n_e = np.zeros(mlat.shape)
n_e[inside_torus] = self.n_e_torus + (self.n_e_pancake - self.n_e_torus) * pancake_factor
p = np.empty(mlat.shape)
p.fill(self.power_law_p)
k = np.empty(mlat.shape)
k.fill(self.pitch_angle_k_torus)
k[inside_torus] = self.pitch_angle_k_torus + \
(self.pitch_angle_k_pancake - self.pitch_angle_k_torus) * pancake_factor
return n_e, p, k
class PancakeWasherDistribution(Distribution):
"""A distribution where the overall particle distribution is a washer but the
parameters smoothly interpolate to different values in a "pancake" along
the magnetic equator.
Plus some other bells and whistles to vaguely match the overall trends
seen in Jupiter's magnetosphere as described in de Pater+
(2003Icar..163..434D).
"""
__section__ = 'pancake-washer-distribution'
r_inner = 2.0
"Inner radius, in units of the body's radius."
r_outer = 7.0
"Outer radius, in units of the body's radius."
thickness = 0.7
"""Washer thickness, in units of the body's radius. Note that the washer will
extend in the magnetic z coordinate from ``-thickness/2`` to
``+thickness/2``, and that this parameter must be set to 2 for the washer
to have the same height as the body.
"""
n_e_washer_max = 1e4
"""The density of energetic electrons in the washer | |
<reponame>IBM/DPM360<gh_stars>10-100
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
## Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import re
from torch.utils.data import Dataset, DataLoader
import numpy as np
import pandas as pd
from six import iteritems
from toolz import functoolz
from torch import is_tensor
import torch as T
from lightsaber.data_utils import utils as du
from lightsaber import constants as C
import warnings
import os
import logging
log = logging.getLogger()
idx_slice = pd.IndexSlice
# -----------------------------------------------------------------------------
# Some basic utils
# ----------------------------------------------------------------------------
def identity_nd(*args):
"""Identity functions
Args:
args: variable arguments
Returns:
args: same arguments
"""
return args
def identity_2d(x, y):
"""
Identity function for 2 variables
Parameters
----------
x :
first param
y :
second param
Returns
-------
x : object
first param
y : object
second param
"""
return x, y
def identity_3d(x, y, z):
return x, y, z
def is_re(s:str,
strict:bool = False) -> bool:
"""checks if `s` is a valid regex.
Parameters
----------
s: str
parameter to check for being a valid regex
strict: bool
if strict mode used, anything except regex compile error will throw exception. else functions return False
Returns
-------
bool
returns True is passed string `s` is a valid reg-ex
Note
----
ref: https://stackoverflow.com/a/19631067
"""
try:
re.compile(s)
is_valid = True
except re.error:
is_valid = False
except Exception as e:
if strict:
raise Exception(e)
else:
warnings.warn(f"regex checking incomplete... error while checking: {e}. assuming invalid regex. use strict=True to throw exception")
is_valid = False
return is_valid
# -----------------------------------------------------------------------------
# List of pre-defined filters
# ----------------------------------------------------------------------------
def _get_flatten_tx(data, method):
'''
Assigns a flatten function to each col in the data dataframe
TODO: MG I think this function needs some work
'''
if 'sum' in method:
cols = {col: 'sum' for col in data
if (col.startswith('ICD') or
col.startswith('PROC') or
col.startswith('LENGTH'))}
for i in data.columns:
if i not in cols:
if i.startswith('EMB'):
cols[i] = 'mean'
else:
cols[i] = 'max'
if 'max' in method:
cols = dict()
for i in data.columns:
if i.startswith('EMB'):
cols[i] = 'mean'
else:
cols[i] = 'max'
if 'mean' in method:
cols = {col: 'mean' for col in data
if (col.startswith('ICD') or
col.startswith('PROC') or
col.startswith('LENGTH') or
col.startswith('EMB'))}
for i in data.columns:
if i not in cols:
cols[i] = 'max'
return cols
@functoolz.curry
def filter_fillna(data, target, fill_value=0., time_order_col=None):
"""
Filter function to remove na
"""
data = data.copy()
idx_cols = data.index.names
if time_order_col is not None:
try:
sort_cols = idx_cols + time_order_col
except:
sort_cols = idx_cols + [time_order_col]
else:
sort_cols = idx_cols
data.update(data.reset_index()
.sort_values(sort_cols)
.groupby(idx_cols[0])
.ffill())
data.fillna(fill_value, inplace=True)
return data, target
@functoolz.curry
def filter_flatten(data, target, method='max'):
log.debug("Starting to flatten")
# ops = dict(sum='sum', max='max', mean='mean')
# col_tx = _get_flatten_tx(data, method)
# data = data.apply(col_tx)
data = (data.groupby(data.index.names)
.agg(method))
# print(time.time())
log.debug("Done in flatten")
return data, target
@functoolz.curry
def filter_flatten_filled_drop_cols(data, target,
aggfunc="sum",
fill_value=0.0,
cols_to_drop=C.DEFAULT_DROP_COLS):
data = data.drop(columns=cols_to_drop, errors='ignore')
# Fillna
data, target = filter_fillna(data, target, fill_value=fill_value)
# Aggfunc
data, target = filter_flatten(data, target, method=aggfunc)
return data, target
@functoolz.curry
def filter_preprocessor(data, target, cols=None, preprocessor=None, refit=False):
if preprocessor is not None:
all_columns = data.columns
index = data.index
# Extracting the columns to fit
if cols is None:
cols = all_columns
_oCols = all_columns.difference(cols)
xData = data[cols]
# If fit required fitting it
if refit:
preprocessor.fit(xData)
log.info(f'Fitting pre-proc: {preprocessor}')
# Transforming data to be transformed
try:
xData = preprocessor.transform(xData)
except NotFittedError:
raise Exception(f"{preprocessor} not fitted. pass fitted preprocessor or set refit=True")
xData = pd.DataFrame(columns=cols, data=xData, index=index)
# Merging other columns if required
if not _oCols.empty:
tmp = pd.DataFrame(data=data[_oCols].values,
columns=_oCols,
index=index)
xData = pd.concat((tmp, xData), axis=1)
# Re-ordering the columns to original order
data = xData[all_columns]
return data, target
def filt_get_last_index(data, target,
idx_col=['DESY_SORT_KEY', 'INDEX_CLAIM_ORDER'],
min_occurence=4
):
"""Filter to get last index claim for each patient.
Filters are designed to be composible functions such that one can chain filters.
Outputs
filtered `data` and `target` with entries for only the last index claim
Parameters
----------
data : DataFrame
feature data
target : DataFrame
target data
idx_col: str or int or List of str|int
index columns
min_occurence: int
number of minimum occurence required for an instance to be included
Returns
-------
data: DataFrame
target: DataFrame
"""
# last index claim for each patient
last_claim_idx = (data.reset_index()[idx_col].groupby([idx_col[0]]) # Group by pateint id
.max()[idx_col[1]].to_frame() # take last index claim order for a patient
.reset_index().set_index(idx_col)) # set index to patient id and index_claim_order
# filter data and keep only last index claim for each patient and its history
data = data[data.reset_index().set_index(idx_col).index.isin(last_claim_idx.index)]
# remove all patients (last claim index) who have only one claim as it is not useful for med2vec
temp = data.reset_index().groupby(idx_col).count().iloc[:,0]
useful_claims_idx = temp[temp>=min_occurence].index
data = data[data.index.isin(useful_claims_idx)]
if target is not None:
target = target[target.index.isin(data.index)]
return data, target
# -----------------------------------------------------------------------------
# List of pre-defined transforms
# ----------------------------------------------------------------------------
@functoolz.curry
def transform_default(data, time_order_col, fill_value=0.):
raise DeprecationWarning("deprecated. this will be dropped in v0.3. use [transform_drop_cols, transform_fill]")
data = (data.drop(columns=time_order_col) # REMOVING Time order col
.fillna(method='ffill') # fllling up NAN
.fillna(method='bfill')
.fillna(fill_value)
)
return data
@functoolz.curry
def transform_drop_cols(data, cols_to_drop=C.DEFAULT_DROP_COLS):
data = data.drop(columns=cols_to_drop, errors='ignore')
return data
@functoolz.curry
def transform_fillna(data, fill_value=0.):
data = (data.fillna(method='ffill') # fllling up NAN
.fillna(method='bfill')
.fillna(fill_value)
)
return data
@functoolz.curry
def transform_flatten(data, method='max'):
"""Transform data to flatten data by last value
Parameters
----------
data : feature values
Returns
-------
flattend data
"""
# ops = dict(sum='sum', max='max', mean='mean')
col_tx = _get_flatten_tx(data, method)
data = data.apply(col_tx)
return data
DEFAULT_TRANSFORM = [transform_drop_cols, transform_fillna]
DEFAULT_FILTER = [identity_nd]
# -----------------------------------------------------------------------------
# Dataset class and its uitls
# ----------------------------------------------------------------------------
class EmptyDataset(Dataset):
def __len__(self):
return 0
class BaseDataset(Dataset):
def __init__(self, tgt_file, feat_file,
idx_col, tgt_col,
feat_columns=None, time_order_col=None,
category_map=C.DEFAULT_MAP,
transform=DEFAULT_TRANSFORM, filter=DEFAULT_FILTER,
device='cpu'):
"""Base dataset class
Parameters
----------
tgt_file:
target file path
feat_file:
feature file path
idx_col: str or List[str]
index columns in the data. present in both `tgt_file` and `feat_file`
tgt_col: str or List[str]
target column present in `tgt_file`
feat_columns:
feature columns to select from. either a single regex or list of columns (partial regex that matches the complete column name is ok. e.g. `CCS` would only match `CCS` whereas `CCS.*` will match `CCS_XYZ` and `CCS`)
Default: `None` -> implies all columns
time_order_col:
column(s) that signify the time ordering for a single example.
Default: `None` -> implies no columns
category_map:
dictionary of column maps
transform: single callable or list/tuple of callables
how to transform data. if list of callables provided eg `[f, g]`, `g(f(x))` used
Default: drop `lightsaber.constants::DEFAULT_DROP_COLS` and fillna
filter: single callable or list/tuple of callables
how to filter data. if list of callables provided eg `[f, g]`, `g(f(x))` used
Default: no operation
device: str
valid pytorch device. `cpu` or `gpu`
Examples
--------
Example of feature columns.
>>> df = pd.DataFrame(columns = ['CCS_128', 'CCS', 'AGE', 'GENDER'])
>>> feat_columns = ['CCS_.*', 'AGE']
>>> BaseDataset._select_features(df, feat_columns)
['CCS_128', 'AGE']
>>> feat_columns = ['CCS', 'AGE'] # would select 'CCS' and 'AGE'
>>> BaseDataset._select_features(df, feat_columns)
['CCS', 'AGE']
"""
self._tgt_file = tgt_file
self._feat_file = feat_file
self._idx_col = idx_col
self._tgt_col = tgt_col
self._feat_columns = feat_columns
self._time_order_col = time_order_col
self._filter = self._compose(filter, manual=True)
# reading data
self.read_data()
# apply filters on datasets
self.apply_filters()
# Handle categorical columns
self.data = self.one_hot_encode(self.data, category_map)
# Book-keeping of number of instances
self.sample_idx = self.data.index.to_series().drop_duplicates()
# Runtime configurations
self.device = device
self._transform = self._compose(transform)
return
@classmethod
def _compose(cls, obj, manual=False):
if obj is None:
obj = identity_nd
if isinstance(obj, (list, tuple)):
if manual:
pass
else:
obj = functoolz.compose(*obj)
else:
if manual:
obj = [obj]
else:
pass
return obj
@classmethod
def _select_features(cls, data, columns):
if columns is not None:
if is_re(columns):
_feat_re = columns
else:
| |
Parchment": 0xEFE3CA,
"Ivory Ridge": 0xD9C9B8,
"Ivory Steam": 0xF0EADA,
"Ivory Stone": 0xEEE1CC,
"Ivory Tassel": 0xF8EAD8,
"Ivory Tower": 0xFBF3F1,
"Ivy": 0x226C63,
"Ivy Enchantment": 0x93A272,
"Ivy Garden": 0x818068,
"Ivy Green": 0x585442,
"Ivy League": 0x007958,
"Ivy Topiary": 0x67614F,
"Ivy Wreath": 0x708D76,
"Iwai Brown": 0x6B6F59,
"Iwaicha Brown": 0x5E5545,
"Iyanden Darksun": 0xA59A59,
"Izmir Pink": 0xCEB0B5,
"Izmir Purple": 0x4D426E,
"J's Big Heart": 0xA06856,
"<NAME>": 0xAD6D68,
"Jabłoński Grey": 0x536871,
"Jacaranda": 0xF9D7EE,
"Jacaranda Jazz": 0x6C70A9,
"Jacaranda Light": 0xA8ACB7,
"Jacaranda Pink": 0xC760FF,
"Jacarta": 0x440044,
"Jacey's Favorite": 0xBCACCD,
"Jack and Coke": 0x920F0E,
"Jack Bone": 0x869F69,
"Jack Frost": 0xDAE6E3,
"Jack Rabbit": 0xC0B2B1,
"Jack-o": 0xFB9902,
"Jack-O-Lantern": 0xD37A51,
"Jackal": 0xA9A093,
"Jackfruit": 0xF7C680,
"Jacko Bean": 0x413628,
"Jackpot": 0xD19431,
"<NAME>ique": 0xC3BDA9,
"Jacksons Purple": 0x3D3F7D,
"Jacobean Lace": 0xE4CCB0,
"Jacqueline": 0x5D4E50,
"Jacuzzi": 0x007CAC,
"Jade": 0x00A86B,
"Jade Bracelet": 0xC2D7AD,
"Jade Cream": 0x60B892,
"Jade Dragon": 0x6AA193,
"Jade Dust": 0xCEDDDA,
"Jade Glass": 0x00CED1,
"Jade Gravel": 0x0ABAB5,
"Jade Green": 0x779977,
"Jade Jewel": 0x247E81,
"Jade Light Green": 0xC1CAB7,
"Jade Lime": 0xA1CA7B,
"Jade Mist": 0xD6E9D7,
"Jade Mountain": 0x34C2A7,
"Jade Mussel Green": 0x166A45,
"Jade Orchid": 0x00AAAA,
"Jade Powder": 0x2BAF6A,
"Jade Shard": 0x017B92,
"Jade Spell": 0xC1E5D5,
"Jade Stone Green": 0x74BB83,
"Jade Tinge": 0xBBCCBC,
"Jaded": 0x0092A1,
"Jaded Clouds": 0xAEDDD3,
"Jaded Ginger": 0xCC7766,
"Jadeite": 0x38C6A1,
"Jadesheen": 0x77A276,
"Jadite": 0x61826C,
"Jaffa": 0xE27945,
"Jaffa Orange": 0xD86D39,
"Jagdwurst": 0xFFCCCB,
"Jagged Ice": 0xCAE7E2,
"Jagger": 0x3F2E4C,
"Jaguar": 0x29292F,
"Jaguar Rose": 0xF1B3B6,
"Jaipur Pink": 0xD0417E,
"Jakarta": 0xEFDDC3,
"Jakarta Skyline": 0x3D325D,
"Jalapeño": 0x9A8D3F,
"Jalapeño Bouquet": 0x576648,
"Jalapeño Red": 0xB2103C,
"Jam Session": 0xD4CFD6,
"Jama Masjid Taupe": 0xB38B6D,
"Jamaica Bay": 0x95CBC4,
"Jamaican Dream": 0x04627A,
"Jamaican Jade": 0x64D1BE,
"Jamaican Sea": 0x26A5BA,
"Jambalaya": 0xF7B572,
"<NAME>": 0xF2E3B5,
"Janemba Red": 0xFF2211,
"Janey's Party": 0xCEB5C8,
"Janitor": 0x2266CC,
"Janna": 0xF4EBD3,
"January Blue": 0x00A1B9,
"January Dawn": 0xDFE2E5,
"January Frost": 0x99C1DC,
"January Garnet": 0x7B4141,
"Japan Blush": 0xDDD6F3,
"Japanese Bonsai": 0x829F96,
"Japanese Carmine": 0x9F2832,
"Japanese Coral": 0xC47A88,
"Japanese Cypress": 0x965036,
"Japanese Fern": 0xB5B94C,
"Japanese Horseradish": 0xA8BF93,
"Japanese Indigo": 0x264348,
"Japanese Iris": 0x7F5D3B,
"Japanese Kimono": 0xCC6358,
"Japanese Koi": 0xDB7842,
"Japanese Laurel": 0x2F7532,
"Japanese Maple": 0x780109,
"Japanese Poet": 0xC4BAB7,
"Japanese Rose Garden": 0xE4B6C4,
"Japanese Sable": 0x313739,
"Japanese Violet": 0x5B3256,
"Japanese Wax Tree": 0xB77B57,
"Japanese White": 0xEEE6D9,
"Japanese Wineberry": 0x522C35,
"Japanese Yew": 0xD8A373,
"Japonica": 0xCE7259,
"Jardin": 0xBDD0AB,
"J<NAME>": 0xC6CAA7,
"Jargon Jade": 0x53A38F,
"Jarrah": 0x827058,
"Jasmine": 0xFFF4BB,
"Jasmine Flower": 0xF4E8E1,
"Jasmine Green": 0x7EC845,
"Jasmine Hollow": 0x7E7468,
"Jasper": 0xD73B3E,
"<NAME>": 0xE7C89F,
"Jasper Green": 0x57605A,
"Jasper Orange": 0xDE8F4E,
"Jasper Park": 0x4A6558,
"Jasper Stone": 0x8D9E97,
"Java": 0x259797,
"Jay Bird": 0x50859E,
"Jay Wing Feathers": 0x7994B5,
"Jazlyn": 0x464152,
"Jazz": 0x5F2C2F,
"Jazz Age Blues": 0x3B4A6C,
"Jazz Age Coral": 0xF1BFB1,
"Jazz Blue": 0x1A6A9F,
"Jazz Tune": 0x9892A8,
"Jazzberry Jam": 0x674247,
"Jazzercise": 0xB6E12A,
"Jazzy": 0xB61C50,
"Jazzy Jade": 0x55DDCC,
"Jealous Jellyfish": 0xBB0099,
"Jealousy": 0x7FAB60,
"Jean Jacket Blue": 0x7B90A2,
"Jeans Indigo": 0x6D8994,
"Jedi Night": 0x041108,
"Jefferson Cream": 0xF1E4C8,
"Jelly Bean": 0x44798E,
"Jelly Berry": 0xEE1177,
"Jelly Slug": 0xDE6646,
"Jelly Yogurt": 0xEDE6D9,
"Jellybean Pink": 0x9B6575,
"Jellyfish Blue": 0x95CAD0,
"Jellyfish Sting": 0xEE6688,
"Jemima": 0xF6D67F,
"Jerboa": 0xDEB887,
"<NAME>": 0x4D8681,
"Jersey Cream": 0xF5DEBB,
"Jess": 0x25B387,
"Jester Red": 0x9E1030,
"Jet": 0x343434,
"Jet Black": 0x2D2C2F,
"Jet d'Eau": 0xD1EAEC,
"Jet Fuel": 0x575654,
"Jet Grey": 0x9D9A9A,
"Jet Set": 0x262C2A,
"Jet Ski": 0x5492AF,
"Jet Stream": 0xBBD0C9,
"Jet White": 0xF2EDE2,
"Jetski Race": 0x005D96,
"Jetstream": 0xB0D2D6,
"Jewel": 0x136843,
"Jewel Caterpillar": 0xD374D5,
"Jewel Cave": 0x3C4173,
"Jewel Weed": 0x46A795,
"Jewellery White": 0xCED6E6,
"Jewett White": 0xE6DDCA,
"Jigglypuff": 0xFFAAFF,
"Jimbaran Bay": 0x3D5D64,
"Jīn Huáng Gold": 0xF5D565,
"Jīn Sè Gold": 0xA5A502,
"Jīn Zōng Gold": 0x8E7618,
"Jinza Safflower": 0xEE827C,
"Jinzamomi Pink": 0xF7665A,
"Jitterbug": 0xBAC08A,
"Jitterbug Jade": 0x019D6E,
"Jitterbug Lure": 0x8DB0AD,
"Jittery Jade": 0x77EEBB,
"Job's Tears": 0x005B7A,
"Jocose Jade": 0x77CC99,
"Jocular Green": 0xCCE2CA,
"Jodhpur Blue": 0x9BD7E9,
"Jodhpur Tan": 0xDAD1C8,
"Jodhpurs": 0xEBDCB6,
"Jogging Path": 0xC0B9A9,
"<NAME>": 0xEEFF22,
"<NAME>": 0xBC86AF,
"Jojoba": 0xDABE81,
"Jokaero Orange": 0xEA5505,
"Joker's Smile": 0xD70141,
"Jolly Green": 0x5E774A,
"Jolly Jade": 0x77CCBB,
"Jonquil": 0xEEF293,
"Jonquil Trail": 0xF7D395,
"Jordan Jazz": 0x037A3B,
"Jordy Blue": 0x7AAAE0,
"Josephine": 0xD3C3BE,
"Joshua Tree": 0x7FB377,
"Journal White": 0xE6D3B2,
"Journey to the Sky": 0xCDECED,
"Journey's End": 0xBAC9D4,
"Joust Blue": 0x55AAFF,
"Jovial": 0xEEB9A7,
"Jovial Jade": 0x88DDAA,
"Joyful": 0xF6EEC0,
"Joyful Lilac": 0xE4D4E2,
"Joyful Orange": 0xFA9335,
"Joyful Poppy": 0xEBADA5,
"Joyful Ruby": 0x503136,
"Joyful Tears": 0x006669,
"Joyous": 0xFFEEB0,
"Joyous Song": 0x5B365E,
"<NAME>": 0xF9900F,
"Jube": 0x4B373C,
"Jube Green": 0x78CF86,
"Jubilant Jade": 0x44AA77,
"Jubilation": 0xFBDD24,
"Jubilee": 0x7E6099,
"Jubilee Grey": 0x7C7379,
"<NAME>": 0x473739,
"Judge Grey": 0x5D5346,
"Jugendstil Green": 0xC3C8B3,
"Jugendstil Pink": 0x9D6375,
"Jugendstil Turquoise": 0x5F9B9C,
"Juggernaut": 0x255367,
"Juice Violet": 0x442238,
"Juicy Details": 0xD9787C,
"Juicy Fig": 0x7D6C4A,
"Juicy Jackfruit": 0xEEDD33,
"Juicy Lime": 0xB1CF5D,
"Juicy Mango": 0xFFD08D,
"Juicy Passionfruit": 0xF18870,
"Julep": 0x57AA80,
"Julep Green": 0xC7DBD9,
"Jules": 0xA73940,
"July": 0x8BD2E3,
"July Ruby": 0x773B4A,
"Jumbo": 0x878785,
"June": 0x9BC4D4,
"June Berry": 0x9B96B6,
"June Bud": 0xBDDA57,
"June Bug": 0x264A48,
"June Bugs": 0xBB6633,
"June Day": 0xFFE182,
"June Vision": 0xF1F1DA,
"Juneberry": 0x775496,
"Jungle": 0x00A466,
"Jungle Adventure": 0x446D46,
"Jungle Book Green": 0x366C4E,
"Jungle Camouflage": 0x53665A,
"Jungle Civilization": 0x69673A,
"Jungle Cloak": 0x686959,
"Jungle Cover": 0x565042,
"Jungle Expedition": 0xB49356,
"Jungle Green": 0x048243,
"Jungle Juice": 0xA4C161,
"Jungle Khaki": 0xC7BEA7,
"Jungle King": 0x4F4D32,
"Jungle Mist": 0xB0C4C4,
"Jungle Moss": 0xBDC3AC,
"Jungle Noises": 0x36716F,
"Jungle Trail": 0x6D6F42,
"Juniper": 0x74918E,
"Juniper Ash": 0x798884,
"<NAME>": 0x547174,
"<NAME>": 0xB9B3C2,
"J<NAME> Blue": 0x3F626E,
"Juniper Breeze": 0xD9E0D8,
"Juniper Green": 0x567F69,
"Juniper Oil": 0x6B8B75,
"Junket": 0xFBECD3,
"Junkrat": 0x998778,
"Jupiter": 0xE1E1E2,
"Jupiter Brown": 0xAC8181,
"Jurassic Gold": 0xE7AA56,
"Jurassic Park": 0x3C663E,
"Just a Fairytale": 0x6C5D97,
"Just a Little": 0xDBE0D6,
"Just A Tease": 0xFBD6D2,
"Just About Green": 0xE2E7D3,
"Just About White": 0xE8E8E0,
"Just Blush": 0xFAB4A4,
"Just Gorgeous": 0xD6C4C1,
"Just Peachy": 0xF8C275,
"Just Perfect": 0xEAECD3,
"Just Pink Enough": 0xFFEBEE,
"Just Right": 0xDCBFAC,
"Just Rosey": 0xC4A295,
"Justice": 0x606B8E,
"Jute": 0xAD9773,
"Jute Brown": 0x815D40,
"Juzcar Blue": 0xA1D5F1,
"<NAME>": 0x736354,
"<NAME>": 0xB14A30,
"Kabalite Green": 0x038C67,
"Kabocha Green": 0x044A05,
"Kabul": 0x6C5E53,
"Kacey's Pink": 0xE94B7E,
"Kachi Indigo": 0x393E4F,
"Kaffee": 0x816D5A,
"Kaffir Lime": 0xB9AB85,
"Kahili": 0xB7BFB0,
"Kahlua Milk": 0xBAB099,
"Kahu Blue": 0x0093D6,
"Kaitoke Green": 0x245336,
"Kakadu Trail": 0x7D806E,
"Kākāriki Green": 0x298256,
"Kakitsubata Blue": 0x3E62AD,
"Kālā Black": 0x201819,
"Kala Namak": 0x46444C,
"Kalahari Sunset": 0x9F5440,
"Kalamata": 0x5F5B4C,
"Kale": 0x5A7247,
"Kale Green": 0x4F6A56,
"Kaleidoscope": 0x8DA8BE,
"Kali Blue": 0x00505A,
"Kalish Violet": 0x552288,
"Kalliene Yellow": 0xB59808,
"Kaltes Klares Wasser": 0x0FFEF9,
"Kamenozoki Grey": 0xC6C2B6,
"Kamut": 0xCCA483,
"Kanafeh": 0xDD8833,
"Kandinsky Turquoise": 0x2D8284,
"Kangaroo": 0xC5C3B0,
"Kangaroo Fur": 0xC4AD92,
"Kangaroo Paw": 0xDECAC5,
"Kangaroo Pouch": 0xBDA289,
"Kangaroo Tan": 0xE4D7CE,
"Kansas Grain": 0xFEE7CB,
"Kantor Blue": 0x001146,
"Kanzō Orange": 0xFF8936,
"Kaolin": 0xAD7D40,
"Kappa Green": 0xC5DED1,
"Kara Cha Brown": 0x783C1D,
"Karacha Red": 0xB35C44,
"Karak Stone": 0xBB9662,
"Karaka": 0x2D2D24,
"Karaka Orange": 0xF04925,
"Karakurenai Red": 0xC91F37,
"Kariyasu Green": 0x6E7955,
"Karma": 0xB2A484,
"Karma Chameleon": 0x9F78A9,
"Karry": 0xFEDCC1,
"Kashmir": 0x6F8D6A,
"Kashmir Blue": 0x576D8E,
"Kashmir Pink": 0xE9C8C3,
"Kasugai Peach": 0xF3DFD5,
"Kathleen's Garden": 0x8FA099,
"Kathmandu": 0xAD9A5D,
"Katsura": 0xC9E3CC,
"Katy Berry": 0xAA0077,
"Katydid": 0x66BC91,
"Kauai": 0x5AC7AC,
"Kawaii": 0xEAABBC,
"Kazakhstan Yellow": 0xFEC50C,
"Keel Joy": 0xD49595,
"Keemun": 0xA49463,
"Keen Green": 0x226600,
"Keepsake": 0xC0CED6,
"Keepsake Lilac": 0xC0A5AE,
"Keepsake Rose": 0xB08693,
"Keese Blue": 0x0000BC,
"Kefir": 0xD5D5CE,
"Kelley Green": 0x02AB2E,
"<NAME>": 0xDEC7CF,
"Kelly Green": 0x339C5E,
"Kelly's Flower": 0xBABD6C,
"Kelp": 0x4D503C,
"<NAME>": 0x716246,
"Kelp Forest": 0x448811,
"Kelp'thar Forest Blue": 0x0092AE,
"<NAME>": 0x437B48,
"Ken Masters Red": 0xEC2C25,
"Kendal Green": 0x547867,
"Kendall Rose": 0xF7CCCD,
"Kenny's Kiss": 0xD45871,
"Kenpō Brown": 0x543F32,
"Kenpōzome Black": 0x2E211B,
"Kentucky": 0x6395BF,
"Kentucky Blue": 0xA5B3CC,
"Kentucky Bluegrass": 0x22AABB,
"Kenya": 0xCCA179,
"Kenyan Copper": 0x6C322E,
"Kenyan Sand": 0xBB8800,
"Keppel": 0x5FB69C,
"Kermit Green": 0x5CB200,
"Kernel": 0xECB976,
"Kerr's Pink Potato": 0xB57281,
"Keshizumi Cinder": 0x524E4D,
"Kestrel White": 0xE0D6C8,
"Ketchup": 0x9A382D,
"Kettle Black": 0x131313,
"Kettle Corn": 0xF6E2BD,
"Kettle Drum": 0x9BCB96,
"Kettleman": 0x606061,
"Key Keeper": 0xECD1A5,
"Key Largo": 0x7FB6A4,
"Key Lime": 0xAEFF6E,
"Key Lime Pie": 0xBFC921,
"Key Lime Water": 0xE8F48C,
"Key to the City": 0xBB9B7C,
"Key West Zenith": 0x759FC1,
"Keystone": 0xB39372,
"Keystone Gray": 0x9E9284,
"Keystone Grey": 0xB6BBB2,
"Khaki": 0xC3B091,
"Khaki Brown": 0x954E2A,
"Khaki Core": 0xFBE4AF,
"Khaki Green": 0x728639,
"Khaki Shade": 0xD4C5AC,
"Khardic Flesh": 0xB16840,
"Khemri Brown": 0x76664C,
"Khmer Curry": 0xEE5555,
"Khorne Red": 0x6A0001,
"Kickstart Purple": 0x7777CC,
"Kid Gloves": 0xB6AEAE,
"Kid Icarus": 0xA81000,
"Kid's Stuff": 0xED8732,
"Kidnapper": 0xBFC0AB,
"Kihada Yellow": 0xFEF263,
"Kikorangi Blue": 0x2E4EBF,
"Kikuchiba Gold": 0xE29C45,
"Kikyō Purple": 0x5D3F6A,
"Kilauea Lava": 0x843D38,
"Kilim Beige": 0xD7C5AE,
"Kilimanjaro": 0x3A3532,
"Kilkenny": 0x498555,
"Killarney": 0x49764F,
"Killer Fog": 0xC9D2D1,
"<NAME>": 0xA89887,
"Kimberley Sea": 0x386B7D,
"Kimberley Tree": 0xB8C1B1,
"Kimberlite": 0x696FA5,
"Kimberly": 0x695D87,
"Kimchi": 0xED4B00,
"Kimirucha Brown": 0x896C39,
"Kimono": 0x6D86B6,
"Kimono Grey": 0x3D4C51,
"Kimono Violet": 0x75769B,
"Kin Gold": 0xF39800,
"Kincha Brown": 0xC66B27,
"Kind Green": 0xAAC2B3,
| |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import math
import os
import numpy as np
from numpy import pi,cos,sin
import pandas as pd
import logging
from plotnine import *
from scipy.stats.mstats import winsorize
from plotnine.stats.stat_summary import bootstrap_statistics
#%% put PUPIL LABS data into PANDAS DF
def gaze_to_pandas(gaze):
# Input: gaze data as dictionary
# Output: pandas dataframe with gx, gy, confidence, smpl_time pupillabsdata, diameter and (calculated) pupil area (pa)
import pandas as pd
list_diam= []
list_pa= []
for idx,p in enumerate(gaze):
if p:
if 'surface' in gaze[0]['topic']:
# we have a surface mapped dictionairy. We have to get the real base_data
# the schachtelung is: surfacemapped => base_data World Mapped => base_data pupil
p_basedata = p['base_data']['base_data']
else:
p_basedata = p['base_data']
# take the mean over all pupil-diameters
diam = 0
pa = 0
for idx_bd,bd in enumerate(p_basedata):
pa = convert_diam_to_pa(bd['ellipse']['axes'][0], bd['ellipse']['axes'][1])
diam = diam + bd['diameter']
diam = diam/(idx_bd+1)
list_diam.append(diam)
list_pa.append(pa)
df = pd.DataFrame({'gx':[p['norm_pos'][0] for p in gaze if p],
'gy':[p['norm_pos'][1] for p in gaze if p],
'confidence': [p['confidence'] for p in gaze if p],
'smpl_time':[p['timestamp'] for p in gaze if p],
'diameter':list_diam,
'pa': list_pa
})
return df
def convert_diam_to_pa(axes1, axes2):
return math.pi * float(axes1) * float(axes2) * 0.25
#%% adding information to dfs
def add_msg_to_event(etevents,etmsgs,timefield = 'start_time', direction='backward'):
# combine the event df with the msg df
etevents = etevents.sort_values('start_time')
etmsgs = etmsgs.sort_values('msg_time')
# make a merge on the msg time and the start time of the events
merged_etevents = pd.merge_asof(etevents,etmsgs,left_on='start_time',right_on='msg_time',direction=direction)
return merged_etevents
def add_events_to_samples(etsamples, etevents):
# Calls append_eventtype_to_sample for each event
# Also adds blink_id
logger = logging.getLogger(__name__)
logger.info(etevents.type.unique())
for evt in etevents.type.unique():
etsamples = append_eventtype_to_sample(etsamples,etevents,eventtype=evt)
# add blink id
if evt == 'blink':
# counts up the blink_id
# Pure Magic
etsamples.loc[:,'blink_id'] = (1*(etsamples['type']=='blink')) * ((1*(etsamples['type']=='blink')).diff()==1).cumsum()
return(etsamples)
def append_eventtype_to_sample(etsamples,etevents,eventtype,timemargin=None):
# get a logger
logger = logging.getLogger(__name__)
logger.debug('Appending eventtype: %s to samples',eventtype)
if timemargin is None:
if eventtype== 'blink':
logger.info('Taking Default value for timemargin (blink = -0.1s/0.1s)')
timemargin = [-.1,.1]
else:
logger.info('Taking Default value for timemargin (fix/saccade = 0s)')
timemargin = [0,0]
# get index of the rows that have that eventtype
ix_event = etevents['type']==eventtype
# get list of start and end indeces in the etsamples df
eventstart = etevents.loc[ix_event,'start_time']+float(timemargin[0])
eventend = etevents.loc[ix_event,'end_time']+float(timemargin[1])
flat_ranges = eventtime_to_sampletime(etsamples,eventstart,eventend)
# all etsamples with ix in ranges , will the eventype in the column type
if len(flat_ranges) > 0:
etsamples.loc[etsamples.index[flat_ranges], 'type'] = eventtype
return etsamples
def eventtime_to_sampletime(etsamples,eventstart,eventend):
# due to timemargin strange effects can occur and we need to clip
mintime = etsamples.smpl_time.iloc[0]
maxtime = etsamples.smpl_time.iloc[-1]
eventstart.loc[eventstart < mintime] = mintime
eventstart.loc[eventstart > maxtime] = maxtime
eventend.loc[eventend < mintime] = mintime
eventend.loc[eventend > maxtime] = maxtime
if len(eventstart)!=len(eventend):
raise error
startix = np.searchsorted(etsamples.smpl_time,eventstart)
endix = np.searchsorted(etsamples.smpl_time,eventend)
#print('%i events of %s found'%(len(startix),eventtype))
# make a list of ranges to have all indices in between the startix and endix
ranges = [list(range(s,e)) for s,e in zip(startix,endix)]
flat_ranges = [item for sublist in ranges for item in sublist]
flat_ranges = np.intersect1d(flat_ranges,range(etsamples.shape[0]))
return(flat_ranges)
#%% last fixation (e.g. for large GRID)
def only_last_fix(merged_etevents, next_stim = ['condition','block', 'element']):
# we group by block and element and then take the last fixation
# TODO commented out cause it raises weird error
# for HMM we define alle smooth pursuit as fixations
# merged_etevents.type[merged_etevents.type == 'smoothpursuit'] = 'fixation'
# use only fixation events and group by block and element and then take the last one of it
large_grid_df = merged_etevents[merged_etevents.type == 'fixation'].groupby(next_stim).last()
large_grid_df.reset_index(level= next_stim, inplace=True)
return large_grid_df
#%% function to make groupby easier
def group_to_level_and_take_mean(raw_condition_df, lowestlevel):
"""
make a groupby
"""
if lowestlevel=='subject':
# get df grouped by et and subject
# --> takes the mean of the accuracy and precision measures over all blocks
grouped_df = raw_condition_df.groupby(['et', 'subject']).mean().reset_index(level=['et', 'subject'])
elif lowestlevel=='block':
# get df grouped by et, subject and block
# --> makes a mean for each block of the subject
grouped_df = raw_condition_df.groupby(['et', 'subject','block']).mean().reset_index(level=['et','subject','block'])
elif lowestlevel=='element_positions':
# get df grouped by et, subject and block
# --> makes a mean for each block of the subject
grouped_df = raw_condition_df.groupby(['et', 'subject', 'block','posx', 'posy']).mean().reset_index(level=['et', 'subject', 'block','posx', 'posy'])
elif lowestlevel=='condition':
# get df grouped by et, subject and GRID condition
# --> makes a mean for each Gridcondition of the subject
grouped_df = raw_condition_df.groupby(['et', 'subject', 'condition']).mean().reset_index(level=['et', 'subject', 'condition'])
else:
raise ValueError('This level is unknown / not implemented')
return grouped_df
#%% set dtypes of dataframe and make the labes ready to get plotted
def set_dtypes(df):
"""
Set the dtype of the categories, so that plotting is easier and more pretty.
E.g. set column 'et' from object to categorical
"""
# make all object variables categorical
df[df.select_dtypes(['object']).columns] = df.select_dtypes(['object']).apply(lambda x: x.astype('category'))
# list of categorical variables that have to be treated separately as they were not object dtypes
categorial_var = ["block", "trial", "pic_id"]
# set columns to correct dtype
for column in categorial_var:
if column in df:
# fill none values to not have problems with integers
df[column] = df[column].fillna(-1)
# convert ids to interger and round them to make them look nicely
df[column] = pd.to_numeric(df[column], downcast='integer')
df[column] = df[column].round(0).astype(int)
# convert -1 back to None
df[column] = df[column].astype(str)
df[column] = df[column].replace('-1', np.nan)
# old version
#df[column] = df[column].astype('category')
# logging.debug('dtypes of the df after: %s', df.dtypes)
return df
def set_to_full_names(df):
"""
rename columns and values to their full name
e.g. et --> Eye-Tracker
"""
# TODO maybe more renaming?
# maybe dont do this but rather use xaxis relabeling
# rename columnnames
# df = df.rename(index=str, columns={"et": "Eye-Tracker", "pic_id": "picture id", "fix_count": "number of fixations"})
#rename values
df.loc[:,'et'] = df['et'].map({'el': 'EyeLink', 'pl': 'Pupil Labs'})
return df
#%% everything related to VISUAL DEGREES
def size_px2deg(px, mm_per_px=0.276,distance=600):
"""
function to get the picture size of the freeviewing task
from pixels into visual angle
"""
deg = 2*np.arctan2(px/2*mm_per_px,distance)*180/np.pi
return deg
def px2deg(px, orientation, mm_per_px=0.276,distance=600):
# VD
# "gx_px - gx_px-midpoint"
# subtract center of our BENQ
if orientation == 'horizontal':
center_x = 1920 / 2
px = px - center_x
elif orientation == 'vertical':
center_y = 1080 / 2
px = px - center_y
else:
raise('unknown option')
deg = np.arctan2(px*mm_per_px,distance)*180/np.pi
return deg
def sph2cart(theta_sph,phi_sph,rho_sph=1):
xyz_sph = np.asarray([rho_sph * sin(theta_sph) * cos(phi_sph),
rho_sph * sin(theta_sph) * sin(phi_sph),
rho_sph * cos(theta_sph)])
return xyz_sph
#%% LOAD & SAVE & FIND file
def load_file(et,subject,datapath='/net/store/nbp/projects/etcomp/',outputprefix='',cleaned=True):
# filepath for preprocessed folder
preprocessed_path = os.path.join(datapath, subject, 'preprocessed')
et = outputprefix+et
try:
if cleaned:
filename_samples = str(et) + '_cleaned_samples.csv'
else:
filename_samples = str(et) + '_samples.csv'
filename_msgs = str(et) + '_msgs.csv'
filename_events = str(et) + '_events.csv'
etsamples = pd.read_csv(os.path.join(preprocessed_path,filename_samples))
etmsgs = pd.read_csv(os.path.join(preprocessed_path,filename_msgs))
etevents = pd.read_csv(os.path.join(preprocessed_path,filename_events))
except FileNotFoundError as e:
print(e)
raise e
return etsamples,etmsgs,etevents
def save_file(data,et,subject,datapath,outputprefix=''):
# filepath for preprocessed folder
preprocessed_path = os.path.join(datapath, subject, 'preprocessed')
# create new folder if there is none
if not os.path.exists(preprocessed_path):
os.makedirs(preprocessed_path)
et = outputprefix+et
# dump data in csv
filename_samples = str(et) + '_samples.csv'
filename_cleaned_samples = str(et) + '_cleaned_samples.csv'
filename_msgs = str(et) + '_msgs.csv'
filename_events = str(et) + '_events.csv'
# make separate csv file for every df
data[0].to_csv(os.path.join(preprocessed_path, filename_samples), index=False)
data[1].to_csv(os.path.join(preprocessed_path, filename_cleaned_samples), index=False)
data[2].to_csv(os.path.join(preprocessed_path, filename_msgs), index=False)
data[3].to_csv(os.path.join(preprocessed_path, filename_events), index=False)
def findFile(path,ftype):
# finds file for el edf
out = [edf for edf in os.listdir(path) if edf.endswith(ftype)]
return(out)
def get_subjectnames(datapath='/net/store/nbp/projects/etcomp/'):
return os.listdir(datapath)
#%% Tic Toc Matlab equivalent to time things
import time
def TicTocGenerator():
# Generator that returns time differences
ti = 0 # initial time
tf = time.time() # final time
while True:
ti = tf
tf = time.time()
yield tf-ti # returns the time difference
TicToc = TicTocGenerator() # create an instance of the TicTocGen generator
# This will be the main function through which we define both tic() and toc()
def | |
<filename>test/test_proteins.py<gh_stars>0
from unittest import TestCase
from six import assertRaisesRegex
from six.moves import builtins
from contextlib import contextmanager
from dark.proteins import (
splitNames, _NO_PATHOGEN_NAME, getPathogenProteinCounts, ProteinGrouper,
PathogenSampleFiles)
from dark.utils import StringIO
try:
from unittest.mock import patch
except ImportError:
from mock import patch
class TestSplitNames(TestCase):
"""
Tests for the splitNames function.
"""
def testNoBrackets(self):
"""
If a string with no trailing pathogen name in square brackets is given,
splitNames must return its argument followed by a string indicating
no pathogen name could be found.
"""
self.assertEqual(('xxx', _NO_PATHOGEN_NAME), splitNames('xxx'))
def testTwoSetsOfBrackets(self):
"""
If a string with two trailing substrings in square brackets is given,
splitNames must extract the substring from the second set of square
brackets and use that as the pathogen name.
"""
self.assertEqual(('xxx [other]', 'pathogen'),
splitNames('xxx [other] [pathogen]'))
def testWhitespaceStripping(self):
"""
If a string with names that have whitespace is passed, splitNames
must strip the whitespace in its result.
"""
self.assertEqual(('xxx', 'pathogen'),
splitNames(' xxx [ pathogen ]'))
def testNestedBrackets(self):
"""
If a string with two nested trailing substrings in square brackets is
given, splitNames must return its argument followed by a string
indicating no pathogen name could be found.
"""
self.assertEqual(('xxx [nested [pathogen name]]', _NO_PATHOGEN_NAME),
splitNames('xxx [nested [pathogen name]]'))
def testNormalCase(self):
"""
If a string with a protein and pathogen name is passed, splitNames
must return the expected result.
"""
self.assertEqual(('protein name', 'pathogen name'),
splitNames('protein name [pathogen name]'))
class TestGetPathogenProteinCounts(TestCase):
"""
Tests for the getPathogenProteinCounts function.
"""
def testNone(self):
"""
getPathogenProteinCounts must return an empty result if passed None as
the protein FASTA file.
"""
self.assertEqual({}, getPathogenProteinCounts(None))
def testExpected(self):
"""
getPathogenProteinCounts must return the expected result.
"""
class SideEffect(object):
def __init__(self, test):
self.test = test
self.count = 0
def sideEffect(self, filename, **kwargs):
if self.count == 0:
self.test.assertEqual('filename.fasta', filename)
self.count += 1
return StringIO('>protein 1 [pathogen 1]\n' +
'ACTG\n' +
'>protein 2 [pathogen 1]\n' +
'AA\n' +
'>no pathogen name here\n' +
'AA\n' +
'>protein 3 [pathogen 2]\n' +
'AA\n')
else:
self.test.fail('We are only supposed to be called once!')
sideEffect = SideEffect(self)
with patch.object(builtins, 'open') as mockMethod:
mockMethod.side_effect = sideEffect.sideEffect
self.assertEqual(
{
'pathogen 1': 2,
'pathogen 2': 1,
},
getPathogenProteinCounts(['filename.fasta']))
self.assertEqual(1, sideEffect.count)
def testExpectedWithTwoFiles(self):
"""
getPathogenProteinCounts must return the expected result when details
are read from two FASTA files.
"""
class SideEffect(object):
def __init__(self, test):
self.test = test
self.count = 0
def sideEffect(self, filename, **kwargs):
if self.count == 0:
self.test.assertEqual('filename1.fasta', filename)
self.count += 1
return StringIO('>protein 1 [pathogen 1]\n' +
'ACTG\n' +
'>protein 3 [pathogen 2]\n' +
'AA\n')
elif self.count == 1:
self.test.assertEqual('filename2.fasta', filename)
self.count += 1
return StringIO('>protein 2 [pathogen 1]\n' +
'AA\n')
else:
self.test.fail('We are only supposed to be called twice!')
sideEffect = SideEffect(self)
with patch.object(builtins, 'open') as mockMethod:
mockMethod.side_effect = sideEffect.sideEffect
self.assertEqual(
{
'pathogen 1': 2,
'pathogen 2': 1,
},
getPathogenProteinCounts(
['filename1.fasta', 'filename2.fasta']))
self.assertEqual(2, sideEffect.count)
class TestProteinGrouper(TestCase):
"""
Tests for the dark.proteins.ProteinGrouper class.
"""
def testUnknownFormat(self):
"""
Passing an unknown format argument must result in a ValueError
being raised.
"""
error = "^format_ must be either 'fasta' or 'fastq'\\.$"
assertRaisesRegex(self, ValueError, error, ProteinGrouper,
format_='unknown')
def testNoAssetDir(self):
"""
If no asset directorey is given to a protein grouper, its _assetDir
attribute be the default ('out').
"""
pg = ProteinGrouper()
self.assertEqual('out', pg._assetDir)
def testAssetDir(self):
"""
If an asset directorey is given to a protein grouper, its _assetDir
attribute be set to hold that value.
"""
pg = ProteinGrouper(assetDir='xxx')
self.assertEqual('xxx', pg._assetDir)
def testNoSampleName(self):
"""
If no sample name is given to a protein grouper, its _sampleName
attribute must be None.
"""
pg = ProteinGrouper()
self.assertEqual(None, pg._sampleName)
def testNoRegex(self):
"""
If no regex is given to a protein grouper, its _sampleNameRegex
attribute mustbe None.
"""
pg = ProteinGrouper()
self.assertEqual(None, pg._sampleNameRegex)
def testNoFiles(self):
"""
If no files have been given to a protein grouper, its sample names and
pathogen names attributes must both be empty.
"""
pg = ProteinGrouper()
self.assertEqual({}, pg.pathogenNames)
self.assertEqual({}, pg.sampleNames)
def testUnknownPathogenType(self):
"""
If the toHTML method of a protein grouper is given an unknown pathogen
type it must raise a ValueError.
"""
pg = ProteinGrouper()
error = ("^Unrecognized pathogenType argument: 'x'\\. Value must be "
"either 'bacterial' or 'viral'\\.$")
assertRaisesRegex(self, ValueError, error, pg.toHTML, pathogenType='x')
def testDuplicatePathogenProteinSample(self):
"""
If a protein grouper is given duplicate information for a
pathogen/protein/sample combination it must raise a ValueError.
"""
fp = StringIO('0.77 46.6 48.1 5 6 74 acc|GENBANK|I44.6|GENBANK|J77|'
'ubiquitin [Lausannevirus]\n')
pg = ProteinGrouper()
pg.addFile('sample', fp)
fp.seek(0)
error = ("^Protein 'acc\\|GENBANK\\|I44.6\\|GENBANK\\|J77|"
"ubiquitin' already seen for pathogen 'Lausannevirus' "
"sample 'sample'\\.$")
assertRaisesRegex(self, ValueError, error, pg.addFile, 'sample', fp)
def testOneLineInOneFile(self):
"""
If a protein grouper is given one file with one line, its pathogenNames
dict must be as expected.
"""
fp = StringIO('0.77 46.6 48.1 5 6 74 acc|GENBANK|I44.6|GENBANK|J77|'
'ubiquitin [Lausannevirus]\n')
pg = ProteinGrouper()
pg.addFile('sample-filename', fp)
self.assertEqual(
{
'Lausannevirus': {
'sample-filename': {
'proteins': {
'acc|GENBANK|I44.6|GENBANK|J77|ubiquitin': {
'bestScore': 48.1,
'bluePlotFilename': 'out/0.png',
'coverage': 0.77,
'readsFilename': 'out/0.fasta',
'hspCount': 6,
'index': 0,
'medianScore': 46.6,
'outDir': 'out',
'proteinLength': 74,
'proteinName': ('acc|GENBANK|I44.6|GENBANK|'
'J77|ubiquitin'),
'proteinURL': (
'http://www.ncbi.nlm.nih.gov/nuccore/'
'I44.6'),
'genomeURL': (
'http://www.ncbi.nlm.nih.gov/nuccore/J77'),
'readCount': 5,
'readAndHspCountStr': '5/6',
},
},
'uniqueReadCount': None,
},
}
},
pg.pathogenNames)
def testOneLineInOneFileWithDifferentAssetDir(self):
"""
If a protein grouper is given a different assetDir name,
the outDir needs to have that same name, as expected.
"""
fp = StringIO('0.77 46.6 48.1 5 6 74 acc|GENBANK|I44.6|GENBANK|J77|'
'ubiquitin [Lausannevirus]\n')
pg = ProteinGrouper(assetDir='differentname')
pg.addFile('sample-filename', fp)
self.assertEqual(
{
'Lausannevirus': {
'sample-filename': {
'proteins': {
'acc|GENBANK|I44.6|GENBANK|J77|ubiquitin': {
'bestScore': 48.1,
'bluePlotFilename': 'differentname/0.png',
'coverage': 0.77,
'readsFilename': 'differentname/0.fasta',
'hspCount': 6,
'index': 0,
'medianScore': 46.6,
'outDir': 'differentname',
'proteinLength': 74,
'proteinName': ('acc|GENBANK|I44.6|GENBANK|'
'J77|ubiquitin'),
'proteinURL': ('http://www.ncbi.nlm.nih.gov/'
'nuccore/I44.6'),
'genomeURL': (
'http://www.ncbi.nlm.nih.gov/nuccore/J77'),
'readCount': 5,
'readAndHspCountStr': '5/6',
},
},
'uniqueReadCount': None,
},
}
},
pg.pathogenNames)
def testOneLineInOneFileFASTQ(self):
"""
If a protein grouper is given one file with one line, its pathogenNames
dict must be as expected, including for a FASTQ file.
"""
fp = StringIO('0.77 46.6 48.1 5 6 74 acc|GENBANK|I44.6|GENBANK|J77|'
'ubiquitin [Lausannevirus]\n')
pg = ProteinGrouper(format_='fastq')
pg.addFile('sample-filename', fp)
self.assertEqual(
{
'Lausannevirus': {
'sample-filename': {
'proteins': {
'acc|GENBANK|I44.6|GENBANK|J77|ubiquitin': {
'bestScore': 48.1,
'bluePlotFilename': 'out/0.png',
'coverage': 0.77,
'readsFilename': 'out/0.fastq',
'hspCount': 6,
'index': 0,
'medianScore': 46.6,
'outDir': 'out',
'proteinLength': 74,
'proteinName': ('acc|GENBANK|I44.6|GENBANK|'
'J77|ubiquitin'),
'proteinURL': ('http://www.ncbi.nlm.nih.gov/'
'nuccore/I44.6'),
'genomeURL': (
'http://www.ncbi.nlm.nih.gov/nuccore/J77'),
'readCount': 5,
'readAndHspCountStr': '5/6',
},
},
'uniqueReadCount': None,
},
}
},
pg.pathogenNames)
def testOneLineInOneFileTitle(self):
"""
If a protein grouper is given one file with one line, its _title method
must return the expected string.
"""
fp = StringIO('0.77 46.6 48.1 5 6 74 acc|GENBANK|I44.6|GENBANK|J77|'
'ubiquitin [Lausannevirus]\n')
pg = ProteinGrouper()
pg.addFile('sample-filename', fp)
self.assertEqual(
'Overall, proteins from 1 pathogen were found in 1 sample.',
pg._title())
def testTwoLinesInOneFileTitle(self):
"""
If a protein grouper is given one file with two protein lines, each
from a different pathogen, its _title method must return the expected
string.
"""
fp = StringIO(
'0.77 46.6 48.1 5 6 74 acc|GENBANK|I44.6|GENBANK|J77|'
'ubiquitin [Lausannevirus]\n'
'0.77 46.6 48.1 5 6 74 acc|GENBANK|I44.6|GENBANK|J77|'
'ubiquitin [X Virus]\n'
)
pg = ProteinGrouper()
pg.addFile('sample-filename', fp)
self.assertEqual(
'Overall, proteins from 2 pathogens were found in 1 sample.',
pg._title())
def testTwoLinesInOneFileSamePathogen(self):
"""
If a protein grouper is given one file with two lines from the same
pathogen, its pathogenNames dict must be as expected.
"""
fp = StringIO(
'0.63 41.3 44.2 9 9 12 acc|GENBANK|I44.6|GENBANK|J77|VP1 '
'[Lausannevirus]\n'
'0.77 46.6 48.1 5 6 74 acc|GENBANK|I44.7|GENBANK|J78|VP2 '
'[Lausannevirus]\n'
)
pg = ProteinGrouper()
pg.addFile('sample-filename', fp)
self.assertEqual(
{
'Lausannevirus': {
'sample-filename': {
'proteins': {
'acc|GENBANK|I44.6|GENBANK|J77|VP1': {
'bestScore': 44.2,
'bluePlotFilename': 'out/0.png',
'coverage': 0.63,
'readsFilename': 'out/0.fasta',
'hspCount': 9,
'index': 0,
'medianScore': 41.3,
'outDir': 'out',
'proteinLength': 12,
'proteinName': ('acc|GENBANK|I44.6|GENBANK|'
'J77|VP1'),
'proteinURL': ('http://www.ncbi.nlm.nih.gov/'
'nuccore/I44.6'),
'genomeURL': (
'http://www.ncbi.nlm.nih.gov/nuccore/J77'),
'readCount': 9,
'readAndHspCountStr': '9',
},
'acc|GENBANK|I44.7|GENBANK|J78|VP2': {
'bestScore': 48.1,
'bluePlotFilename': 'out/1.png',
'coverage': 0.77,
'readsFilename': 'out/1.fasta',
'hspCount': 6,
'index': 1,
'medianScore': 46.6,
'outDir': 'out',
'proteinLength': 74,
'proteinName': ('acc|GENBANK|I44.7|GENBANK|'
'J78|VP2'),
'proteinURL': ('http://www.ncbi.nlm.nih.gov/'
'nuccore/I44.7'),
'genomeURL': (
'http://www.ncbi.nlm.nih.gov/nuccore/J78'),
'readCount': 5,
'readAndHspCountStr': '5/6',
},
},
'uniqueReadCount': None,
},
},
},
pg.pathogenNames)
def testTwoLinesInOneFileDifferentPathogens(self):
"""
If a protein grouper is given one file with two lines from different
pathogens, its pathogenNames dict must be as expected.
"""
fp = StringIO(
'0.63 41.3 44.2 9 9 12 acc|GENBANK|I44.6|GENBANK|J77|VP1 | |
"""
mcpython - a minecraft clone written in python licenced under the MIT-licence
(https://github.com/mcpython4-coding/core)
Contributors: uuk, xkcdjerry (inactive)
Based on the game of fogleman (https://github.com/fogleman/Minecraft), licenced under the MIT-licence
Original game "minecraft" by Mojang Studios (www.minecraft.net), licenced under the EULA
(https://account.mojang.com/documents/minecraft_eula)
Mod loader inspired by "Minecraft Forge" (https://github.com/MinecraftForge/MinecraftForge) and similar
This project is not official by mojang and does not relate to it.
"""
import asyncio
import random
import typing
import deprecation
import mcpython.common.config
import mcpython.common.data.DataPacks
import mcpython.common.entity.PlayerEntity
import mcpython.common.state.GameViewStatePart
import mcpython.common.world.Chunk
import mcpython.common.world.Dimension
import mcpython.common.world.GameRule
import mcpython.common.world.OffProcessWorldAccess
import mcpython.common.world.SaveFile
import mcpython.engine.world.AbstractInterface
import mcpython.server.worldgen.WorldGenerationHandler
import mcpython.util.math
import pyglet
from mcpython import shared
from mcpython.engine import logger
from mcpython.engine.Lifecycle import schedule_task
from mcpython.util.annotation import onlyInClient
from mcpython.util.callbacks import wrap_method
class World(mcpython.engine.world.AbstractInterface.IWorld):
"""
Class holding all data of the world
"""
def __init__(self, filename: str = None):
shared.world = self
# todo: add some more variation
self.spawn_point: typing.Tuple[int, int] = (
random.randint(0, 15),
random.randint(0, 15),
)
# todo: change for str-based
self.dimensions: typing.Dict[
int, mcpython.engine.world.AbstractInterface.IDimension
] = {}
self.dim_to_id: typing.Dict[str, int] = {}
shared.dimension_handler.init_dims()
# todo: change to str; todo: move to player; todo: make property
self.active_dimension: int = 0
# container for world-related config; contains: seed [build in] todo: move to config class
self.config: typing.Dict[str, typing.Any] = {}
# the gamerule handler fort his world
self.gamerule_handler: typing.Union[
mcpython.common.world.GameRule.GameRuleHandler, None
] = None
asyncio.get_event_loop().run_until_complete(
self.reset_config()
) # will reset the config
# todo: move to configs / game rules
self.hide_faces_to_not_generated_chunks: bool = True
# the file-name to use, todo: make None if not needed
self.filename: str = "tmp" if filename is None else filename
# the save file instance
self.save_file: mcpython.common.world.SaveFile.SaveFile = (
mcpython.common.world.SaveFile.SaveFile(self.filename)
)
# when in an network, stores an reference to all other players
self.players: typing.Dict[
str, mcpython.common.entity.PlayerEntity.PlayerEntity
] = {}
# The name of the local player; None on dedicated servers
self.local_player: str = "unknown" if shared.IS_CLIENT else None
self.world_loaded = False # describes if the world is loaded or not
self.world_generation_process = mcpython.common.world.OffProcessWorldAccess.OffProcessWorldHelper.spawn_process(
self
)
def tick(self):
for dimension in self.dimensions.values():
if dimension.loaded:
dimension.tick()
self.world_generation_process.run_tasks()
async def add_player(
self,
name: str,
add_inventories: bool = True,
override: bool = True,
dimension=0,
):
"""
Will add a new player into the world
:param name: the name of the player to create
:param add_inventories: if the inventories should be created
:param override: if the player should be re-created if it exists in memory
:return: the player instance
"""
if name is None:
raise ValueError("name cannot be None")
if not override and name in self.players:
return self.players[name]
self.players[name] = shared.entity_manager.spawn_entity(
"minecraft:player",
(0, 0, 0),
name,
dimension=dimension,
)
if add_inventories:
await self.players[name].create_inventories()
return self.players[name]
@onlyInClient()
def get_active_player(
self, create: bool = True
) -> typing.Union[mcpython.common.entity.PlayerEntity.PlayerEntity, None]:
"""
Returns the player instance for this client
:param create: if the player should be created or not (by calling add_player())
:return: the player instance or None if no player with the name is arrival
"""
if not create and (
self.local_player is None or self.local_player not in self.players
):
return
return (
self.players[self.local_player]
if self.local_player in self.players
else asyncio.get_event_loop().run_until_complete(
self.add_player(self.local_player)
)
)
@onlyInClient()
async def get_active_player_async(
self, create: bool = True
) -> typing.Union[mcpython.common.entity.PlayerEntity.PlayerEntity, None]:
"""
Returns the player instance for this client
:param create: if the player should be created or not (by calling add_player())
:return: the player instance or None if no player with the name is arrival
"""
if not create and (
self.local_player is None or self.local_player not in self.players
):
return
return (
self.players[self.local_player]
if self.local_player in self.players
else await self.add_player(self.local_player)
)
def get_player_by_name(self, name: str):
if name not in self.players:
asyncio.get_event_loop().run_until_complete(self.add_player(name))
return self.players[name]
async def get_player_by_name_async(self, name: str):
if name not in self.players:
await self.add_player(name)
return self.players[name]
def player_iterator(self) -> typing.Iterable:
return list(self.players.values())
def entity_iterator(self) -> typing.Iterable:
for dimension in self.dimensions.values():
yield from dimension.entity_iterator()
async def reset_config(self):
"""
Will reset the internal config of the system.
todo: change game rule handler reset to an non-new-instance
calls event world:reset_config in the process
"""
self.config = {"enable_auto_gen": False, "enable_world_barrier": False}
await shared.event_handler.call_async("world:reset_config")
self.gamerule_handler = mcpython.common.world.GameRule.GameRuleHandler(self)
@onlyInClient()
def get_active_dimension(
self,
) -> typing.Union[mcpython.engine.world.AbstractInterface.IDimension, None]:
"""
Will return the dimension the current player is in
:return: the dimension or None if no dimension is set
"""
return self.get_dimension(self.active_dimension)
def get_dimension_names(self) -> typing.Iterable[str]:
return self.dim_to_id.keys()
def get_dimension_by_name(
self, name: str
) -> mcpython.engine.world.AbstractInterface.IDimension:
if isinstance(name, mcpython.engine.world.AbstractInterface.IDimension):
logger.print_stack(
"invoked get_dimension_by_name() with dimension instance as name; this seems not right!"
)
return name
return self.dimensions[self.dim_to_id[name]]
def add_dimension(
self, dim_id: int, name: str, dim_config=None
) -> mcpython.engine.world.AbstractInterface.IDimension:
"""
will add an new dimension into the system
:param dim_id: the id to create under
:param name: the name of the dimension
:param dim_config: the dim_config to use as gen config
:return: the dimension instance
"""
if dim_config is None:
dim_config = {}
dim = self.dimensions[dim_id] = mcpython.common.world.Dimension.Dimension(
self, dim_id, name, gen_config=dim_config
)
self.dim_to_id[dim.name] = dim_id
shared.world_generation_handler.setup_dimension(dim, dim_config)
return dim
@deprecation.deprecated()
def join_dimension(self, dim_id: int):
return asyncio.get_event_loop().run_until_complete(
self.join_dimension_async(dim_id)
)
async def join_dimension_async(self, dim_id: int):
"""
Will change the dimension of the active player
:param dim_id: the dimension to change to todo: make str
todo: move to player
todo: event calls must be async-ed
"""
logger.println("changing dimension to '{}'...".format(dim_id))
await shared.event_handler.call_async(
"dimension:change:pre", self.active_dimension, dim_id
)
sector = mcpython.util.math.position_to_chunk(
(await shared.world.get_active_player_async()).position
)
logger.println("unloading chunks...")
await self.change_chunks_async(sector, None)
old = self.active_dimension
self.active_dimension = dim_id
logger.println("loading new chunks...")
await self.change_chunks_async(None, sector)
await shared.event_handler.call_async("dimension:change:post", old, dim_id)
logger.println("finished!")
def get_dimension(
self, dim_id: typing.Union[int, str]
) -> mcpython.engine.world.AbstractInterface.IDimension:
"""
will get an dimension with an special id
:param dim_id: the id to use
:return: the dimension instance or None if it does not exist
"""
if dim_id in self.dimensions:
return self.dimensions[dim_id]
if dim_id in self.dim_to_id:
return self.dimensions[self.dim_to_id[dim_id]]
# logger.print_stack("[ERROR] failed to access dim '{}', below call stack".format(dim_id))
def hit_test(
self,
position: typing.Tuple[float, float, float],
vector: typing.Tuple[float, float, float],
max_distance: int = 8,
) -> typing.Union[
typing.Tuple[
typing.Tuple[int, int, int],
typing.Tuple[int, int, int],
typing.Tuple[float, float, float],
],
typing.Tuple[None, None, None],
]:
"""
Line of sight search from current position.
If a block is intersected it is returned, along with the block previously in the line of sight.
If no block is found, return None, None, None
Will check for bounding boxes of blocks (get_view_bbox())
:param position: The (x, y, z) position to check visibility from
:param vector: The line of sight vector, as (dx, dy, dz)
:param max_distance: How many blocks away at max to search for a hit, will stop the ray after
the amount of blocks
todo: cache the bbox of the block
todo: move to dimension
todo: add variant only taking the player
todo: cache when possible
todo: add variant for entities
"""
# get m from the gamerule
m = shared.world.gamerule_handler.table["hitTestSteps"].status.status
x, y, z = position
dx, dy, dz = vector
dx /= m
dy /= m
dz /= m
previous = None
for _ in range(max_distance * m):
key = mcpython.util.math.normalize((x, y, z))
block = self.get_active_dimension().get_block(key)
if (
block
and type(block) != str
and block.get_view_bbox().test_point_hit((x, y, z), block.position)
):
return key, previous, (x, y, z)
if key != previous:
previous = key
x += dx
y += dy
z += dz
return None, None, None
def show_chunk(
self,
chunk: typing.Union[
typing.Tuple[int, int], mcpython.engine.world.AbstractInterface.IChunk
],
):
"""
Ensure all blocks in the given chunk that should be shown are
drawn to the canvas.
:param chunk: the chunk to show
"""
if not issubclass(type(chunk), mcpython.engine.world.AbstractInterface.IChunk):
chunk = self.get_active_dimension().get_chunk(*chunk, generate=False)
if chunk is None:
return
chunk.show()
def hide_chunk(
self,
chunk: typing.Union[
typing.Tuple[int, int], mcpython.engine.world.AbstractInterface.IChunk
],
):
"""
Ensure all blocks in the given chunk that should be hidden are
removed from the canvas.
:param chunk: the chunk to hide
"""
if not issubclass(type(chunk), mcpython.engine.world.AbstractInterface.IChunk):
chunk = self.get_active_dimension().get_chunk(*chunk, generate=False)
if chunk is None:
return
chunk.hide()
@deprecation.deprecated()
def change_chunks(
self,
before: typing.Union[typing.Tuple[int, int], None],
after: typing.Union[typing.Tuple[int, int], None],
generate_chunks=True,
load_immediate=True,
dimension=None,
):
"""
Move from chunk `before` to chunk `after`
:param before: the chunk before
:param after: the chunk after
:param generate_chunks: if chunks should be generated
:param | |
## @ingroup Analyses-Aerodynamics
# Vortex_Lattice.py
#
# Created: Nov 2013, <NAME>
# Modified: 2014, <NAME>, <NAME>, <NAME>
# Feb 2016, <NAME>
# Apr 2017, <NAME>
# Nov 2017, <NAME>
# Dec 2018, <NAME>
# Apr 2020, <NAME>
# Jun 2020, <NAME>
# Sep 2020, <NAME>
# May 2021, <NAME>
# Jun 2021, <NAME>
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
# SUAVE imports
import SUAVE
from SUAVE.Core import Data
from SUAVE.Core import Units
from SUAVE.Methods.Aerodynamics.Common.Fidelity_Zero.Lift.VLM import VLM
# local imports
from .Aerodynamics import Aerodynamics
from SUAVE.Methods.Aerodynamics.Supersonic_Zero.Drag.Cubic_Spline_Blender import Cubic_Spline_Blender
# package imports
import numpy as np
from scipy.interpolate import interp2d, RectBivariateSpline, RegularGridInterpolator
# ----------------------------------------------------------------------
# Class
# ----------------------------------------------------------------------
## @ingroup Analyses-Aerodynamics
class Vortex_Lattice(Aerodynamics):
"""This builds a surrogate and computes lift using a basic vortex lattice.
Assumptions:
None
Source:
None
"""
def __defaults__(self):
"""This sets the default values and methods for the analysis.
Assumptions:
None
Source:
N/A
Inputs:
None
Outputs:
None
Properties Used:
N/A
"""
self.tag = 'Vortex_Lattice'
self.geometry = Data()
self.settings = Data()
self.settings.number_spanwise_vortices = 15
self.settings.number_chordwise_vortices = 5
self.settings.wing_spanwise_vortices = None
self.settings.wing_chordwise_vortices = None
self.settings.fuselage_spanwise_vortices = None
self.settings.fuselage_chordwise_vortices = None
self.settings.spanwise_cosine_spacing = True
self.settings.vortex_distribution = Data()
self.settings.model_fuselage = False
self.settings.model_nacelle = False
self.settings.leading_edge_suction_multiplier = 1.0
self.settings.initial_timestep_offset = 0
self.settings.wake_development_time = 0.05
self.settings.number_of_wake_timesteps = 30
self.settings.propeller_wake_model = False
self.settings.use_bemt_wake_model = False
self.settings.discretize_control_surfaces = False
self.settings.use_VORLAX_matrix_calculation = False
self.settings.floating_point_precision = np.float32
self.settings.use_surrogate = True
# conditions table, used for surrogate model training
self.training = Data()
self.training.angle_of_attack = np.array([[-5., -2. , 0.0 , 2.0, 5.0, 8.0, 10.0 , 12., 45., 75.]]).T * Units.deg
self.training.Mach = np.array([[0.0, 0.1 , 0.2 , 0.3, 0.5, 0.75 , 0.85 , 0.9,\
1.3, 1.35 , 1.5 , 2.0, 2.25 , 2.5 , 3.0 , 3.5]]).T
self.training.lift_coefficient_sub = None
self.training.lift_coefficient_sup = None
self.training.wing_lift_coefficient_sub = None
self.training.wing_lift_coefficient_sup = None
self.training.drag_coefficient_sub = None
self.training.drag_coefficient_sup = None
self.training.wing_drag_coefficient_sub = None
self.training.wing_drag_coefficient_sup = None
# blending function
self.hsub_min = 0.85
self.hsub_max = 0.95
self.hsup_min = 1.05
self.hsup_max = 1.25
# surrogoate models
self.surrogates = Data()
self.surrogates.lift_coefficient_sub = None
self.surrogates.lift_coefficient_sup = None
self.surrogates.lift_coefficient_trans = None
self.surrogates.wing_lift_coefficient_sub = None
self.surrogates.wing_lift_coefficient_sup = None
self.surrogates.wing_lift_coefficient_trans = None
self.surrogates.drag_coefficient_sub = None
self.surrogates.drag_coefficient_sup = None
self.surrogates.drag_coefficient_trans = None
self.surrogates.wing_drag_coefficient_sub = None
self.surrogates.wing_drag_coefficient_sup = None
self.surrogates.wing_drag_coefficient_trans = None
self.evaluate = None
def initialize(self,use_surrogate,n_sw,n_cw,propeller_wake_model, use_bemt_wake_model,ito,wdt,nwts,mf,mn):
"""Drives functions to get training samples and build a surrogate.
Assumptions:
None
Source:
N/A
Inputs:
use_surrogate [bool]
n_sw number of spanwise vortices [int]
n_cw number of chordwise vortices [int]
propeller_wake_model [bool]
ito initial timestep offset [s]
wdt wake development time [s]
nwts number of wake timesteps [int]
Outputs:
None
Properties Used:
None
"""
# Unpack:
settings = self.settings
if n_sw is not None:
settings.number_spanwise_vortices = n_sw
if n_cw is not None:
settings.number_chordwise_vortices = n_cw
settings.use_surrogate = use_surrogate
settings.propeller_wake_model = propeller_wake_model
settings.use_bemt_wake_model = use_bemt_wake_model
settings.initial_timestep_offset = ito
settings.wake_development_time = wdt
settings.number_of_wake_timesteps = nwts
settings.model_fuselage = mf
settings.model_nacelle = mn
# If we are using the surrogate
if use_surrogate == True:
# sample training data
self.sample_training()
# build surrogate
self.build_surrogate()
self.evaluate = self.evaluate_surrogate
else:
self.evaluate = self.evaluate_no_surrogate
def evaluate_surrogate(self,state,settings,geometry):
"""Evaluates lift and drag using available surrogates.
Assumptions:
None
Source:
N/A
Inputs:
state.conditions.
freestream.dynamics_pressure [-]
angle_of_attack [radians]
Outputs:
conditions.aerodynamics.lift_breakdown.
inviscid_wings[wings.*.tag] [-] CL (wing specific)
inviscid_wings_lift.total [-] CL
compressible_wing [-] CL (wing specific)
conditions.aerodynamics.lift_coefficient [-] CL
conditions.aerodynamics.drag_breakdown.induced.
total [-] CDi
inviscid [-] CDi
wings_sectional_drag [-] CDiy (wing specific)
inviscid_wings [-] CDi (wing specific)
Properties Used:
self.surrogates.
lift_coefficient [-] CL
wing_lift_coefficient[wings.*.tag] [-] CL (wing specific)
"""
# unpack
conditions = state.conditions
settings = self.settings
geometry = self.geometry
surrogates = self.surrogates
hsub_min = self.hsub_min
hsub_max = self.hsub_max
hsup_min = self.hsup_min
hsup_max = self.hsup_max
AoA = conditions.aerodynamics.angle_of_attack.T[0]
Mach = conditions.freestream.mach_number.T[0]
# Unapck the surrogates
CL_surrogate_sub = surrogates.lift_coefficient_sub
CL_surrogate_sup = surrogates.lift_coefficient_sup
CL_surrogate_trans = surrogates.lift_coefficient_trans
CDi_surrogate_sub = surrogates.drag_coefficient_sub
CDi_surrogate_sup = surrogates.drag_coefficient_sup
CDi_surrogate_trans = surrogates.drag_coefficient_trans
wing_CL_surrogates_sub = surrogates.wing_lift_coefficient_sub
wing_CL_surrogates_sup = surrogates.wing_lift_coefficient_sup
wing_CL_surrogates_trans = surrogates.wing_lift_coefficient_trans
wing_CDi_surrogates_sub = surrogates.wing_drag_coefficient_sub
wing_CDi_surrogates_sup = surrogates.wing_drag_coefficient_sup
wing_CDi_surrogates_trans = surrogates.wing_drag_coefficient_trans
# Create Result Data Structures
data_len = len(AoA)
inviscid_lift = np.zeros([data_len,1])
inviscid_drag = np.zeros([data_len,1])
conditions.aerodynamics.drag_breakdown.induced = Data()
conditions.aerodynamics.drag_breakdown.induced.inviscid_wings = Data()
conditions.aerodynamics.lift_breakdown = Data()
conditions.aerodynamics.lift_breakdown.inviscid_wings = Data()
conditions.aerodynamics.lift_breakdown.compressible_wings = Data()
conditions.aerodynamics.drag_breakdown.compressible = Data()
# 3 Cases for surrogates, subsonic only, supersonic only, and both
if CL_surrogate_sup == None:
inviscid_lift = CL_surrogate_sub(AoA,Mach,grid=False)
inviscid_drag = CDi_surrogate_sub(AoA,Mach,grid=False)
elif CL_surrogate_sub == None:
inviscid_lift = CL_surrogate_sup(AoA,Mach,grid=False)
inviscid_drag = CDi_surrogate_sup(AoA,Mach,grid=False)
else:
# Spline for Subsonic-to-Transonic-to-Supersonic Regimes
sub_trans_spline = Cubic_Spline_Blender(hsub_min,hsub_max)
h_sub = lambda M:sub_trans_spline.compute(M)
sup_trans_spline = Cubic_Spline_Blender(hsup_min,hsup_max)
h_sup = lambda M:sup_trans_spline.compute(M)
inviscid_lift = h_sub(Mach)*CL_surrogate_sub(AoA,Mach,grid=False) +\
(h_sup(Mach) - h_sub(Mach))*CL_surrogate_trans((AoA,Mach))+ \
(1- h_sup(Mach))*CL_surrogate_sup(AoA,Mach,grid=False)
inviscid_drag = h_sub(Mach)*CDi_surrogate_sub(AoA,Mach,grid=False) +\
(h_sup(Mach) - h_sub(Mach))*CDi_surrogate_trans((AoA,Mach))+ \
(1- h_sup(Mach))*CDi_surrogate_sup(AoA,Mach,grid=False)
# Pack
conditions.aerodynamics.lift_coefficient = np.atleast_2d(inviscid_lift).T
conditions.aerodynamics.lift_breakdown.total = np.atleast_2d(inviscid_lift).T
conditions.aerodynamics.drag_breakdown.induced.inviscid = np.atleast_2d(inviscid_drag).T
for wing in geometry.wings.keys():
if CL_surrogate_sup == None:
inviscid_wing_lifts = wing_CL_surrogates_sub[wing](AoA,Mach,grid=False)
inviscid_wing_drags = wing_CDi_surrogates_sub[wing](AoA,Mach,grid=False)
elif CL_surrogate_sub == None:
inviscid_wing_lifts = wing_CL_surrogates_sup[wing](AoA,Mach,grid=False)
inviscid_wing_drags = wing_CDi_surrogates_sup[wing](AoA,Mach,grid=False)
else:
inviscid_wing_lifts = h_sub(Mach)*wing_CL_surrogates_sub[wing](AoA,Mach,grid=False) + \
(h_sup(Mach) - h_sub(Mach))*wing_CL_surrogates_trans[wing]((AoA,Mach))+ \
(1- h_sup(Mach))*wing_CL_surrogates_sup[wing](AoA,Mach,grid=False)
inviscid_wing_drags = h_sub(Mach)*wing_CDi_surrogates_sub[wing](AoA,Mach,grid=False) + \
(h_sup(Mach) - h_sub(Mach))*wing_CDi_surrogates_trans[wing]((AoA,Mach))+ \
(1- h_sup(Mach))*wing_CDi_surrogates_sup[wing](AoA,Mach,grid=False)
# Pack
conditions.aerodynamics.lift_breakdown.inviscid_wings[wing] = np.atleast_2d(inviscid_wing_lifts).T
conditions.aerodynamics.lift_breakdown.compressible_wings[wing] = np.atleast_2d(inviscid_wing_lifts).T
conditions.aerodynamics.drag_breakdown.induced.inviscid_wings[wing] = np.atleast_2d(inviscid_wing_drags).T
return
def evaluate_no_surrogate(self,state,settings,geometry):
"""Evaluates lift and drag directly using VLM
Assumptions:
no changes to initial geometry or settings
Source:
N/A
Inputs:
state.conditions.
angle_of_attack [radians]
Outputs:
conditions.aerodynamics.lift_breakdown.
inviscid_wings_lift[wings.*.tag] [-] CL (wing specific)
inviscid_wings_lift.total [-] CL
inviscid_wings_sectional [-] Cly
compressible_wing [-] CL (wing specific)
conditions.aerodynamics.drag_breakdown.induced.
total [-] CDi
inviscid [-] CDi
wings_sectional_drag [-] CDiy (wing specific)
induced.inviscid_wings [-] CDi (wing specific)
conditions.aerodynamics.
pressure_coefficient [-] CP
Properties Used:
self.surrogates.
lift_coefficient [-] CL
wing_lift_coefficient[wings.*.tag] [-] CL (wing specific)
"""
# unpack
conditions = state.conditions
settings = self.settings
geometry = self.geometry
# Evaluate the VLM
# if in transonic regime, use surrogate
inviscid_lift, inviscid_drag, wing_lifts, wing_drags, wing_lift_distribution, \
wing_drag_distribution, induced_angle_distribution, pressure_coefficient, CYMTOT,CRMTOT,CM = \
calculate_VLM(conditions,settings,geometry)
# Lift
conditions.aerodynamics.lift_coefficient = inviscid_lift
conditions.aerodynamics.lift_breakdown.total = inviscid_lift
conditions.aerodynamics.lift_breakdown.compressible_wings = wing_lifts
conditions.aerodynamics.lift_breakdown.inviscid_wings = wing_lifts
conditions.aerodynamics.lift_breakdown.inviscid_wings_sectional = wing_lift_distribution
# Drag
conditions.aerodynamics.drag_breakdown.induced = Data()
conditions.aerodynamics.drag_breakdown.induced.total = inviscid_drag
conditions.aerodynamics.drag_breakdown.induced.inviscid = inviscid_drag
conditions.aerodynamics.drag_breakdown.induced.inviscid_wings = wing_drags
conditions.aerodynamics.drag_breakdown.induced.wings_sectional = wing_drag_distribution
conditions.aerodynamics.drag_breakdown.induced.angle = induced_angle_distribution
# Pressure and moment coefficients
conditions.aerodynamics.pressure_coefficient = pressure_coefficient
conditions.aerodynamics.moment_coefficient = CM
# Stability
conditions.stability.static.yawing_moment_coefficient = CYMTOT
conditions.stability.static.rolling_moment_coefficient = CRMTOT
return
def sample_training(self):
"""Call methods to run vortex lattice for sample point evaluation.
Assumptions:
None
Source:
N/A
Inputs:
see properties used
Outputs:
self.training.
lift_coefficient [-]
wing_lift_coefficient [-] (wing specific)
drag_coefficient [-]
wing_drag_coefficient [-] (wing specific)
Properties Used:
self.geometry.wings.*.tag
self.settings (passed to calculate vortex lattice)
self.training.angle_of_attack [radians]
"""
# unpack
geometry = self.geometry
settings = self.settings
training = self.training
AoA = training.angle_of_attack
Mach = training.Mach
lenAoA = len(AoA)
sub_len = int(sum(Mach<1.))
sup_len = len(Mach)-sub_len
# Assign placeholders
CL_sub = np.zeros((lenAoA,sub_len))
CL_sup = np.zeros((lenAoA,sup_len))
CDi_sub = np.zeros((lenAoA,sub_len))
CDi_sup = np.zeros((lenAoA,sup_len))
CL_w_sub = Data()
CL_w_sup = Data()
CDi_w_sub = Data()
CDi_w_sup = Data()
# Setup new array shapes for vectorization
lenM = len(Mach)
AoAs = np.atleast_2d(np.tile(AoA,lenM).T.flatten()).T
Machs = np.atleast_2d(np.tile(Mach,lenAoA).flatten()).T
zeros = np.zeros_like(Machs)
# Setup Konditions
konditions = SUAVE.Analyses.Mission.Segments.Conditions.Aerodynamics()
konditions.aerodynamics.angle_of_attack = AoAs
konditions.freestream.mach_number = Machs
konditions.freestream.velocity = zeros
total_lift, total_drag, wing_lifts, wing_drags, _, _, _, _, _, _, _ = calculate_VLM(konditions,settings,geometry)
# Split subsonic from supersonic
if np.sum(Machs<1.)==0:
sub_sup_split = 0
else:
sub_sup_split = np.where(Machs < 1.0)[0][-1] + 1
len_sub_mach = np.sum(Mach<1.)
len_sup_mach = lenM - len_sub_mach
# Divide up the data to get ready to store
CL_sub = total_lift[0:sub_sup_split,0]
CL_sup = total_lift[sub_sup_split:,0]
CDi_sub = total_drag[0:sub_sup_split,0]
CDi_sup = total_drag[sub_sup_split:,0]
# A little reshape to get into the right order
CL_sub = np.reshape(CL_sub,(len_sub_mach,lenAoA)).T
CL_sup = np.reshape(CL_sup,(len_sup_mach,lenAoA)).T
CDi_sub = np.reshape(CDi_sub,(len_sub_mach,lenAoA)).T
CDi_sup = np.reshape(CDi_sup,(len_sup_mach,lenAoA)).T
# Now do the same for each wing
for | |
self._termlist = terms
self._evaluators = evaluators
self._term_to_column_builders = term_to_column_builders
term_column_count = []
self._column_names = []
for term in self._termlist:
column_builders = self._term_to_column_builders[term]
this_count = 0
for column_builder in column_builders:
this_names = column_builder.column_names()
this_count += len(this_names)
self._column_names += this_names
term_column_count.append(this_count)
term_column_starts = np.concatenate(([0], np.cumsum(term_column_count)))
self._term_slices = []
for i, term in enumerate(self._termlist):
span = slice(term_column_starts[i], term_column_starts[i + 1])
self._term_slices.append((term, span))
self.total_columns = np.sum(term_column_count, dtype=int)
# Generate this on demand, to avoid a reference loop:
@property
def design_info(self):
"""A :class:`DesignInfo` object giving information about the design
matrices that this DesignMatrixBuilder can be used to create."""
return DesignInfo(self._column_names, self._term_slices,
builder=self)
def subset(self, which_terms):
"""Create a new :class:`DesignMatrixBuilder` that includes only a
subset of the terms that this object does.
For example, if `builder` has terms `x`, `y`, and `z`, then::
builder2 = builder.subset(["x", "z"])
will return a new builder that will return design matrices with only
the columns corresponding to the terms `x` and `z`. After we do this,
then in general these two expressions will return the same thing (here
we assume that `x`, `y`, and `z` each generate a single column of the
output)::
build_design_matrix([builder], data)[0][:, [0, 2]]
build_design_matrix([builder2], data)[0]
However, a critical difference is that in the second case, `data` need
not contain any values for `y`. This is very useful when doing
prediction using a subset of a model, in which situation R usually
forces you to specify dummy values for `y`.
If using a formula to specify the terms to include, remember that like
any formula, the intercept term will be included by default, so use
`0` or `-1` in your formula if you want to avoid this.
:arg which_terms: The terms which should be kept in the new
:class:`DesignMatrixBuilder`. If this is a string, then it is parsed
as a formula, and then the names of the resulting terms are taken as
the terms to keep. If it is a list, then it can contain a mixture of
term names (as strings) and :class:`Term` objects.
.. versionadded: 0.2.0
"""
factor_to_evaluators = {}
for evaluator in self._evaluators:
factor_to_evaluators[evaluator.factor] = evaluator
design_info = self.design_info
term_name_to_term = dict(zip(design_info.term_names,
design_info.terms))
if isinstance(which_terms, str):
# We don't use this EvalEnvironment -- all we want to do is to
# find matching terms, and we can't do that use == on Term
# objects, because that calls == on factor objects, which in turn
# compares EvalEnvironments. So all we do with the parsed formula
# is pull out the term *names*, which the EvalEnvironment doesn't
# effect. This is just a placeholder then to allow the ModelDesc
# to be created:
env = EvalEnvironment({})
desc = ModelDesc.from_formula(which_terms, env)
if desc.lhs_termlist:
raise PatsyError("right-hand-side-only formula required")
which_terms = [term.name() for term in desc.rhs_termlist]
terms = []
evaluators = set()
term_to_column_builders = {}
for term_or_name in which_terms:
if isinstance(term_or_name, six.string_types):
if term_or_name not in term_name_to_term:
raise PatsyError("requested term %r not found in "
"this DesignMatrixBuilder"
% (term_or_name,))
term = term_name_to_term[term_or_name]
else:
term = term_or_name
if term not in self._termlist:
raise PatsyError("requested term '%s' not found in this "
"DesignMatrixBuilder" % (term,))
for factor in term.factors:
evaluators.add(factor_to_evaluators[factor])
terms.append(term)
column_builder = self._term_to_column_builders[term]
term_to_column_builders[term] = column_builder
return DesignMatrixBuilder(terms,
evaluators,
term_to_column_builders)
def _build(self, evaluator_to_values, dtype):
factor_to_values = {}
need_reshape = False
num_rows = None
for evaluator, value in six.iteritems(evaluator_to_values):
if evaluator in self._evaluators:
factor_to_values[evaluator.factor] = value
if num_rows is not None:
assert num_rows == value.shape[0]
else:
num_rows = value.shape[0]
if num_rows is None:
# We have no dependence on the data -- e.g. an empty termlist, or
# only an intercept term.
num_rows = 1
need_reshape = True
m = DesignMatrix(np.empty((num_rows, self.total_columns), dtype=dtype),
self.design_info)
start_column = 0
for term in self._termlist:
for column_builder in self._term_to_column_builders[term]:
end_column = start_column + column_builder.total_columns
m_slice = m[:, start_column:end_column]
column_builder.build(factor_to_values, m_slice)
start_column = end_column
assert start_column == self.total_columns
return need_reshape, m
class _CheckMatch(object):
def __init__(self, name, eq_fn):
self._name = name
self._eq_fn = eq_fn
self.value = None
self._value_desc = None
self._value_origin = None
def check(self, seen_value, desc, origin):
if self.value is None:
self.value = seen_value
self._value_desc = desc
self._value_origin = origin
else:
if not self._eq_fn(self.value, seen_value):
msg = ("%s mismatch between %s and %s"
% (self._name, self._value_desc, desc))
if isinstance(self.value, int):
msg += " (%r versus %r)" % (self.value, seen_value)
# XX FIXME: this is a case where having discontiguous Origins
# would be useful...
raise PatsyError(msg, origin)
def build_design_matrices(builders, data,
NA_action="drop",
return_type="matrix",
dtype=np.dtype(float)):
"""Construct several design matrices from :class:`DesignMatrixBuilder`
objects.
This is one of Patsy's fundamental functions. This function and
:func:`design_matrix_builders` together form the API to the core formula
interpretation machinery.
:arg builders: A list of :class:`DesignMatrixBuilders` specifying the
design matrices to be built.
:arg data: A dict-like object which will be used to look up data.
:arg NA_action: What to do with rows that contain missing values. You can
``"drop"`` them, ``"raise"`` an error, or for customization, pass an
:class:`NAAction` object. See :class:`NAAction` for details on what
values count as 'missing' (and how to alter this).
:arg return_type: Either ``"matrix"`` or ``"dataframe"``. See below.
:arg dtype: The dtype of the returned matrix. Useful if you want to use
single-precision or extended-precision.
This function returns either a list of :class:`DesignMatrix` objects (for
``return_type="matrix"``) or a list of :class:`pandas.DataFrame` objects
(for ``return_type="dataframe"``). In both cases, all returned design
matrices will have ``.design_info`` attributes containing the appropriate
:class:`DesignInfo` objects.
Note that unlike :func:`design_matrix_builders`, this function takes only
a simple data argument, not any kind of iterator. That's because this
function doesn't need a global view of the data -- everything that depends
on the whole data set is already encapsulated in the `builders`. If you
are incrementally processing a large data set, simply call this function
for each chunk.
Index handling: This function always checks for indexes in the following
places:
* If ``data`` is a :class:`pandas.DataFrame`, its ``.index`` attribute.
* If any factors evaluate to a :class:`pandas.Series` or
:class:`pandas.DataFrame`, then their ``.index`` attributes.
If multiple indexes are found, they must be identical (same values in the
same order). If no indexes are found, then a default index is generated
using ``np.arange(num_rows)``. One way or another, we end up with a single
index for all the data. If ``return_type="dataframe"``, then this index is
used as the index of the returned DataFrame objects. Examining this index
makes it possible to determine which rows were removed due to NAs.
Determining the number of rows in design matrices: This is not as obvious
as it might seem, because it's possible to have a formula like "~ 1" that
doesn't depend on the data (it has no factors). For this formula, it's
obvious what every row in the design matrix should look like (just the
value ``1``); but, how many rows like this should there be? To determine
the number of rows in a design matrix, this function always checks in the
following places:
* If ``data`` is a :class:`pandas.DataFrame`, then its number of rows.
* The number of entries in any factors present in any of the design
* matrices being built.
All these values much match. In particular, if this function is called to
generate multiple design matrices at once, then they must all have the
same number of rows.
.. versionadded:: 0.2.0
The ``NA_action`` argument.
"""
if isinstance(NA_action, str):
NA_action = NAAction(NA_action)
if return_type == "dataframe" and not have_pandas:
raise PatsyError("pandas.DataFrame was requested, but pandas "
"is not installed")
if return_type not in ("matrix", "dataframe"):
raise PatsyError("unrecognized output type %r, should be "
"'matrix' or 'dataframe'" % (return_type,))
# Evaluate factors
evaluator_to_values = {}
evaluator_to_isNAs = {}
import operator
rows_checker = _CheckMatch("Number of rows", lambda a, b: a == b)
index_checker = _CheckMatch("Index", lambda a, b: a.equals(b))
if have_pandas and isinstance(data, pandas.DataFrame):
index_checker.check(data.index, "data.index", None)
rows_checker.check(data.shape[0], "data argument", None)
for builder in builders:
# We look at evaluators rather | |
<filename>django/docs/howto/error-reporting.txt.py
XXXXXXXXXXXXXXX
XXXXX XXXXXXXXX
XXXXXXXXXXXXXXX
XXXX XXXXXX XXXXXXX X XXXXXX XXXX XXX XXXXXX XXXXXX XXXX XXX XXX
XXXXXXXXXXXXXXXX XXXXXXXX XXXX XXXX XXXX XXXX XXXXXX XXX XXXX XXXXXXX XXX XXXX
XXXX XXXXXXX XXXXXXXXX XXXXX XXXX XXXXXX XXXXXXX XX XXXX XXXXXXXXXXX XXXX XXX XX
XXXXXXXX XX XXX XXXXX XXXXXX
XXXXXXXX XXXXXXX XXXX XXXXXXXXXXXXXXXX XXX XX XXXXXXXXX XXXXX XXXXXX XXXXX XXX
XXXXXX XXXXXXXXX XX XXXX XXXX XX XXXXXXXX XXXX XXXXXXX XXX XXXX XXXXXX XXXXX
XXXXXX XXX XXXX XX XXXX XXXXX XX XXXXXX XXXX XXXXX XX XXXXXXXX XXXXXX XX XXXXXX
XXX XX XXXXXXXXXX XX XXXXXX XXXXXXX XXXX XXXXXXX XXXXX XXXXX XXXXXXX
XXXXX XXXXXXX
XXXXXXXXXXXXX
XXXXXX XXXXXX
XXXXXXXXXXXXX
XXXX XXXXXXXXXXXXXXXX XX XXXXXXXXXX XXXXXX XXXX XXXXX XXX XXXXX XXXXXX XX XXX
XXXXXXXXXXXXXXXXX XXXXXXX XXXXXXXX XXXX XXXX XXXXXX XX XXXXXXXXX XXXXXXXXX XXX
XXXXXXX XX XX XXXXXXXX XXXXXX XXXXX XXXXXXXXX XXXXXXXXX XXX XXX XXXXXXXX XXXX
XX XXXX XXXXXX XXXX XX XXX XX XXXXXXXXX XXXX XXXXX XXX XXXXXXXXXXXXXX XXXXXXXXX
XXXXXXXXXXXX XX XXX XXXXXXX XXX XXXXXXXXXXXXXXXXX XXXX XXX X XXXXXXXXXXX XX XXX
XXXXXX X XXXXXXXX XXXXXX XXXXXXXXXX XXX XXXXXXX XXXXX XXX XXXX XXXXXXX XXXX
XXXXXX XXX XXXXXX
XX XXXXXX
XX XXXXX XX XXXX XXXXXX XXXXXX XXXXXXXX X XXX XXXXXXXX XXXXXXX XX
XXX XX XXXXXXX XX XXXX XXXX XXXXXXX XX XXX XXXX XXXXXX XXXXXX XXXX
XX XXXXXXX XXXXXXXXXXXXXXXXXXXXX XXX XXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXX XXXXXXXX XXX XX XXXX XXXXXXXX XXXXXXXXX XX XXXX XXXX
XXXXXXXX XXXXXXXXXXXXXX XXXXXXX XXXXXXXXX XXXXXX XXXXXXXX
XXXXXXXXXXXXX XXXXXXXXXXXXXXXX XXX X XXXX XXXX XX XXXXXXXXXXXXX
XXXXXXXXX
XX XXXXXXXX XXXXXX XXXX XXXX XXXXX XXXX XXXXXXXXXXXXXXX XXXXXXXX XXXX XXXX
XXXXXXXXX XXXXXX XXX XXXXX XXXX XXXX XXXXXXXX XX XXX X XXXXXXXXX XXXXXX
XXXXXXXX XXXXXX XXX XXXXXXXXXXXXXXXXXXXXXXX XXXXXXXX
XX XXXXXXXX XXXX XXXXXXXXX XXX XXX XXXXX XXXXXXXXX XX XXX XXXXXXXXXX XX XXX
XXXXXXXXXXXXXXXXX XXXXXXXX
XX XXXXXXXXX
XXXXXX XXXXX XXXXXX XXX XXXX XXXXX XXX XXXXXXX XXXXXXXXXX XX XXX XXX
XXXXXXXXX XXXX XXXXXXXX XX XXXXXXXXXXXXXXXXX XXXX XXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXX XXXXXX
XXXXXXXXXX
XXXXXX XXX XXXX XX XXXXXXXXXX XX XXXXX XXXXXX XXXXX XXXXXX XXXXX XXXX XXXXX
XXX XXXXXX XXXXXXXX XXXXXX XXXXX XXXXXX XXXXX XXX XXXXXX XXXXX
X XXXXXXXXXXXXXXXX XX XXXXXXXXXX
X XXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXX XXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XXXXX XXXXXXXXXX XXX XXXX XXXXXX XXXX XXXXX XXX XXXXX XXXXXX XX XXX
XXXXXXXXXXXXXXXXXXX XXXXXXX XXXXXXXX XXXX XXXX XXXXXX X XXX XXX XXX XXXXXXX XXX
X XXXXXXXX XX XXXXXXX XXXXXX XX XXXXX XXX XXXX XXXX XXXXX XXXX X XXXXXXX XX
XXXXX XXX XXXXXXX XXXXXX XXXXXX XX XXXXXX XXXX XX XXXXXX XXX XXXXX XX XXXX
XXXXXXX XXXX XXXX XXX XXXXXXX XX XXXXX XX XXX XXXXXXXXX XXXX XXXXX XXXX
XXXXXXXX XX XXXX XXXXXX XXX XXXX XXXX
XX XXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXX XXXXXX
XXXXXX XXXXX XXXXXXXXXX XXXX XXXXXXXXXX XXX XXXXXXX XXXX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX XX XXXXXXX XXX XXX XX XXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXX
XXX XXX XXXX XXXXXX XX XXXX XXXXXXXXX XXXXXXXXXX XXXX XX XXXXXXXX XXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXX XX XXXXXX XX X XXXX XX XXXXXXXX
XXXXXXX XXXXXXXXXX XXXXXXXX XXX XXXXXXXXX
XXXXXX XX
XXXXXXXXXXXXXXXXXX X X
XXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXX
X
XX XXXX XXXXXXXX X XXX XX XXX XXX XXXXXX XXXX XXXXXXXX XX XXXXXXXX XXXX XXXXX XX
XXXXXXXXX XXXXXXX XXXX XXX XXX XXXXXXXX XXXX XXXXXXXXXXXXXXXXX
XXX XXXXXXXXX XXXXXXX XXXXX XXX XX XXXXXXX XXXX XXXXXXXXXXXX XXXX XXXX XXXXXXXX XXX
XXXXXXXX XXXXX XXXXXXXXX
XXXXXX XX
XXXXXXXXXXXXXXXXXX X X
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
X
XXXXX XXXX XXXXX XXX XXXXXXX XXXXXXXXXXXX XX XX XXX X XXXXXXXXX XX XXXXX XX
XXXXXXX XX XXXXXX XXXXXX
XX XXXXX XXXX XX XXXXXXXXX XXX XXXXXXXX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXX XXXX
XXXXXXX XX XXXXXX XXXXXXXX XXXXXX XXXX XXX XXXXXXXXXX XXX XXXXXX XXXXXXXX XX
XXX XXXXXXXX XXX XXXXXXXX
XX XXXXXXXXX
XXX XXXXXX XXX XXXXXX XXXXX XXX XXXXXXX XXXXXXXXXX XX XXXXXXXX XXXXX XXX
XXXXXXX XXX XXXXXXXX XXX XXX XXX XXX XXXX XXX XXXXX XXXXXXXXX XX XXXXXXX X
XXXXXXX XXX XXXXXXXXXXXXXXXXX XXXXXXX XXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXX
XX XXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXX XXXXX XXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX
XXXXXXXXX XXXXXXXXX XXXX XX X XXXX XXXXXXXX XXX XXXX XXXXXX XXXXXXXXXX XX
XXXXXXXXX XXXX XXXXXXXXX XXXX XXXXX XXXX XXXX XX XXXXX XXXXXXX XXXXXXXXXX
XXXXX XXXXXXX XXXXXX XXXX XX XXXXXXXXX XX XXXXXXX XXXX XXXXXXX XXX XXX
XXXXXX XXXXX XXXXXXXXXXXX XXXXX XXXXXXX XXXXXXXXXXX XXXX XXX XXXXXXXX
XXXXX XX XXXXXXX XXXXXXX
XXXXXXXXX XXXXXXXXX XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXX XXX XXXXXX XXXXXXX XXX XXXXXXXXX XXXXXXX XX XX XX XXXXXXXXX
XXXXXX XX XXXXXX XX XXXX XXXXXXXX XXXXXXXXXXX XXXXX XXXXX XXXXXX XX XXXXXXXXX
XXX XXXXXXXX XX XXXXXXX XXXXXX XXXXXXX XXX XXXXX XXXXXXXXXXX XXX XXX
XXXXXXXXX XXXXXXX XXXX XXXXXXXXXX XXXXXXXXX XXXXX XXXXXXXXXX XXX XXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXX XXXXXXXXX XXXXXXX XXXXX XX XXXXXXXXXXX XXX XX XXX XXXXXXXXX XXX XXXX
XXX XXX XX XXXXXXXXXXX XX XX XXXX XXXXX XXX XXX XXXXXXX X XXXXXX XXXXXXXX XX
XXXXXX XXXX XXXXXXX XX XX XXXXXXXX XX XXXXXXXXX XXX XXXXXXXX XXXX XXXXXX XX XX
XXXXXXXXX XX XXXXXXXXX XX XXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXX XXXXXX XXXXXX X
XXX XX XXXXXXXX XXXXXXXXXX XX XXXX XXX XXXXXXX XXXXX XXXXXXXXXXX XXXXXX XX
XXXXXXXX XXX XX XXXXX XXXXXXX XX X XXXXXXXXXX XXXXXXXXXXX XXXXX XXX XXXXX
XXXXXXXXXXXXXXXX XX XXX XX XXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XXXXXX XXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXXXX XXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXX XXXXXXX X XXXX XX XXX XXXXXXX XXXXXXXXX XX XXXX XXXX XXXX
XXXXX XXXXXXXXX XXXXXXXXXXX XX XXXXXXX XXXXXXXXX XXXXXXXXXXXX XXX XXX
XXXXXXX XXX XXXXXX XX XXXXX XXXXXXXXX XXXX XXXXX XXXXXXXX XX XXXXX XXXXXXX
XXXXX XXX XXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXX
XXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXX XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXX
XXX XXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXX
XXXX X XXXXXXXXX
XXX
XX XXX XXXXX XXXXXXXX XXX XXXXXX XXX XXX XXXXXXXXX XXXXXX XXX XXXXXX
XXXXXXXXX XXXX XX XXXXXX XXX XXXXXXXX XXXX XXXXX XXXXXXXXXXXXXXXX
XX XXX XXXXX XXXXXXXX XXXXXXX XXX XXXXX XX XXX XXXXXXXX XXXXXXXX XXXX XX
XXXXXXXXXX
XX XXXXXXXXXXXXXX XXXX XXX XXXXX XXXXXXXXX XX X XXXXXXXX XXXX XXXXX XXXXX
XX XXX XXXXXXX XXX XXXXXXXX XX XXX XXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXX
XXX XXXXXXXXXXXXXX
XXX
XX XXXXXXXXXXXX XXXX XXXXX XXXXXXXX XXXXXXXXXX
XX XXX XXXXXXXX XXX XXXX XX XXXX XX XXXX X XXXXXXXX XXXXXXXX XXXXX
XXXXXXXXXX XX XXX XXXXXXXXX XXXXXXXXX XXX XX XXX XXXXXXXXX XXXXXXXX XXX
XXXXXXXX XXXXXXXXXXX XXXX XXXX XXXX XX XXXXX XXXXXXXXXXXXXXXXXXXXXXXX
XX XXX XXX XX XXX XXXXXXXXX XXXXXX XXXX XXX XX XXXX XXXX XXXX XXX
XXXXXXXX XXXXXXXX XX XX XXXX XXXXXX XXXXXXX XXX XXXXX XXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXX XXXXXXXXXXXXXXXXXXX
XXX
XX XXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XXX XX XXXX XXXXX XXXXXXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXX
XXXX XXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXX XX
XXXXXXX XXXXXXXXX XXXXXXXXXXXX XXX XXX XXXXXXX XXX XXXXXX XX XXXXX
XXXXXXXXXX XXXX XXXXX XXXXXXXX XX XXX XXXXX XXXXXXX XXXXX XXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXX
XXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX
X
XXX
XX XXX XXXXX XXXXXXXX XXX XXXXXX XXX XXX XXXXXXXXXXXXX XXX
XXXXXXXXXXXXXXXXXXXXXX XXXX XXXXXXXXXX XXXX XX XXXXXX XXX XXXXXXXX XXXX
XXXXX XXXXXXXXXXXXXXXX XX XXX XXXXXXXXX XXXXXXXXXXXXXX XXXXXX XXX
XXXXX XXXXXXXX XXXXXXX XXX XXXXX XX XXX XXXXXXXX XXXXXXXXX XXXX XX
XXXXXXXXXX
XX XXXXXXXXXXXXXX XXXX XXX XXXX XXXXXXXXXX XX X XXXXXXX XX XXXXX XXXXXXXX
XX XXX XXXXXXX XXX XXXXXXXX XX XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX XXXXXXXXXXXXXXXXX
XXX
XXX XXXX XXXXXXXXXX XXX XXXXXXXXXXXXXX XXXXXXXX XXX XX XXXXX XXXXXXX XXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXX XXX XXXXXXXXXXXX XXX
XXXXXXXXXXXXXXXXXXXXXXXX XX XXX XXXXXXXX XXXXXX XX XXXXXXX XXX XXXXXXX XX
XXXXXXXXX XXXXXXXXXXX XXXX XX XXXX XXXXXXXXXX
XX XXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXX XXXXXXX
XXXXXXXXXXXXXXXXXXXX
XXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XXX
XXXXXXXXXXXXX XXXXXXXX XXX XXXXXXXXX XXXXXXXX XXXX XXX XXXXX XX XXXXXXXXX
XXXXXXXXX XXX XXXXXXXX XXX XXXXXXXXXXXXXXX XXXXXX XXXX XXX XXXXX XX XXXXXXXXX
XXXX XXXXXXXXXXX XX XXXX XXXX XXXXXXXXX XXXXXXXXXXX XXX XXXXX XX XXXXXXXX XXX
XX XXXXXXX XXXX XX XXXXX XXXXXXX XXX XXXXXX XXXXXXXXX XX XXXX XX XXXXXXXX
XXXXXXX XXXXX XXXXXXXX XXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXX XXXXXX XXXX XXX
XXXXXXXXXXX XXXXXXXXXXX XX XXXXXXX XXX XXXXXXXXXXXXX XXXXXX XXXX XXXXX
XXXXXXXXXXXXXXXX XXXX XXX XXXXX XXXXXXX XXX XXXXXXXXX XX XXX XXXX XX
XXXXXXXX XX XXXXXXXXX XXXX XXXXXXX XXXXXXXX XXX XXXX XXXXXX XXXXX XXX XXXX XX
XXXXXX XXXX XXX XXXXXX XXXXX XXX XXXX XXXXXX XX XXX XX XXX XXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX XXX XXXX XXXXXXX XX X XXXX XXXXXXXX XXX XXXXX XXXXXX XX XXX XXXXXX XXX
XXXXX XXXX XX XXXXXXX XXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXX XXXXXXXXXXXXXXXXX
XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX
XX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXX
XXXX XXXXXX XXXXXX XXXXX XXXXX XX XXXXXXX XXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX XXX XXXXXXXX XXX
XXXXXXXXX XXXXXXXXXX XXX XXXXXXXX
XX XXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXXXX XXXXXXXXXXXXXXXXXXX
XX XXXXXXXXXXXXXX XXX
XXX XXXXXX XXXXX XX XXXXXXX XXXXXXXXX XXXXX XXXXX XX XXXXXXX XX
XXXXXXXX XXX XXXXXX XX XXXXXXXXX XXXXXXXXX XXXX XXXXX
XXXXXXXXXXXXXXXXX
XX XXXXXXXXXXX XXXXXXXXXXXXXXX
XX XXXXXXXXXXXXXX XXX
X XXXXXXXX XXXXXXX XXXXXXXXXX XXXXXX XXXX XX XXXXX XXXXXXXX XXX
XXXXXXXXXXXXXXXX XXXXXX XXXXXXXXXX XX XXXXXXXXXX XX XXXXXXX XXXXXXXXXX
XXXX
XXXXXX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXX XX XXXXXXXX XXX XXXXXXXXX | |
from __future__ import annotations
import copy
import dataclasses
import functools
import getpass
import json
import logging
import logging.handlers
import os
import platform
import queue as queue_module
import socket
import sys
import typing
import warnings
from .utils import get_fully_qualified_domain_name
# The special logger:
logger = logging.getLogger('pcds-logging')
# Do not propagate messages to the root logger:
logger.propagate = False
# Exceptions that should just be ignored entirely:
NO_LOG_EXCEPTIONS = (KeyboardInterrupt, SystemExit)
DEFAULT_LOG_HOST = os.environ.get('PCDS_LOG_HOST', 'ctl-logsrv01.pcdsn')
DEFAULT_LOG_PORT = int(os.environ.get('PCDS_LOG_PORT', 54320))
DEFAULT_LOG_PROTO = os.environ.get('PCDS_LOG_PROTO', 'tcp')
ALLOWED_LOG_DOMAINS = set(
os.environ.get("PCDS_LOG_DOMAINS", ".pcdsn .slac.stanford.edu").split(" ")
)
_LOGGER_SCHEMA_VERSION = 0
_LOGGER_ALLOWED_KEYS = {
# 'args',
# 'created',
# 'exc_info',
'exc_text',
'filename',
# 'funcName',
# 'levelname',
# 'levelno',
'lineno',
# 'message',
# 'module',
# 'msecs',
'msg',
# 'name',
'pathname',
# 'process',
'processName',
# 'relativeCreated',
# 'stack_info',
# 'thread',
'threadName',
# Ones our schema specifies:
'schema',
'source',
'versions',
'hostname',
'username',
'host_info',
}
_LOGGER_KEY_RENAMES = {
'created': 'ts', # created time -> timestamp (ts)
'levelname': 'severity',
'processName': 'process_name',
'threadName': 'thread_name',
}
_SYSTEM_UNAME_DICT = dict(platform.uname()._asdict())
_CURRENT_HANDLER = None
class _PassthroughStreamHandler(logging.handlers.SocketHandler):
def makePickle(self, record):
'Overrides super().makePickle'
return record.encode('utf-8') + b'\n'
class _PassthroughDatagramHandler(logging.handlers.DatagramHandler):
def makePickle(self, record):
'Overrides super().makePickle'
return record.encode('utf-8')
class _LogQueueListener(logging.handlers.QueueListener):
'A log handler which listens in a separate thread for queued records'
def _get_module_versions():
'Yields module version tuples: (module_name, module_version)'
def fix_version(version):
if isinstance(version, bytes):
# _curses, for example
return version.decode('utf-8')
if not isinstance(version, str):
# Some may have incorrectly specified version as a tuple
return '.'.join([str(part) for part in version])
return version
for name, module in sys.modules.items():
if hasattr(module, '__version__'):
try:
version = fix_version(module.__version__)
except Exception:
version = repr(module.__version__)
yield name.replace('.', '_'), version
def _get_module_version_dict():
'Returns module version dictionary: {module_name: module_version}'
return dict(_get_module_versions())
def create_log_dictionary_from_record(record: logging.LogRecord) -> dict:
'''
Create a PCDS logging-compliant dictionary from a given logging.LogRecord
Ensure that exceptions have been formatted with `logging.Handler` prior to
calling this function.
Parameters
----------
record : logging.LogRecord or dict
The record to interpret
Returns
-------
dict
The ready-to-be-JSON'd record dictionary
'''
# Shallow-copy the record dictionary
ret = dict(record if isinstance(record, dict) else vars(record))
ret['schema'] = f'python-event-{_LOGGER_SCHEMA_VERSION}'
def failsafe_call(func, *args, value_on_failure=None, **kwargs):
try:
return func(*args, **kwargs)
except Exception as ex:
if value_on_failure is None:
return f'FAILURE: {type(ex).__name__}: {ex}'
return value_on_failure
ret['source'] = failsafe_call('{module}.{funcName}:{lineno}'.format, **ret)
ret['versions'] = failsafe_call(_get_module_version_dict)
ret['pathname'] = str(failsafe_call(os.path.abspath, ret['pathname']))
ret['hostname'] = failsafe_call(socket.gethostname)
ret['host_info'] = _SYSTEM_UNAME_DICT
ret['username'] = getpass.getuser()
for from_, to in _LOGGER_KEY_RENAMES.items():
ret[to] = ret.pop(from_)
other_keys = set(ret) - _LOGGER_ALLOWED_KEYS
for key in other_keys:
ret.pop(key)
return ret
class _JsonLogQueueHandler(logging.handlers.QueueHandler):
'Logging handler which pushes `logging.LogRecord`s to a separate thread'
def __init__(self, *handlers, queue=None):
queue = queue or queue_module.Queue()
super().__init__(queue)
self.listener = _LogQueueListener(self.queue)
self.listener.handlers = list(handlers)
self.listener.start()
def prepare(self, record):
'Overrides QueueHandle prepare'
# Avoid modifying the record in-place; other handlers will be affected
record = copy.copy(record)
if record.exc_info:
# Format the traceback into record.exc_text:
_ = self.format(record)
# Send along the serialized JSON to any downstream handlers, at least
# `self.listener`
return json.dumps(create_log_dictionary_from_record(record))
def configure_pcds_logging(
file=sys.stdout, *,
log_host=DEFAULT_LOG_HOST, log_port=DEFAULT_LOG_PORT,
protocol=DEFAULT_LOG_PROTO,
level='DEBUG'):
"""
Set a new handler on the ``logging.getLogger('pcds-logging')`` logger.
If this is called more than once, the handler from the previous invocation
is removed (if still present) and replaced.
Parameters
----------
log_host : str, optional
The log host server host. Defaults to the environment variable
PCDS_LOG_HOST.
log_port : int, optional
The log host server port. Defaults to the environment variable
PCDS_LOG_PORT.
protocol : {'tcp', 'udp'}
Use UDP or TCP as the transport protocol. Defaults to the environment
variable PCDS_LOG_PROTO.
level : str or int
Minimum logging level, given as string or corresponding integer.
Default is 'DEBUG'.
Returns
-------
handler : logging.Handler
The handler, which has already been added to the 'pcds-logging' logger.
"""
global _CURRENT_HANDLER
handler_cls = {
'udp': _PassthroughDatagramHandler,
'tcp': _PassthroughStreamHandler
}[protocol.lower()]
socket_handler = handler_cls(log_host, log_port)
handler = _JsonLogQueueHandler(socket_handler)
levelno = validate_log_level(level)
handler.setLevel(levelno)
# handler.setFormatter(_LogFormatter(PLAIN_LOG_FORMAT))
if _CURRENT_HANDLER in logger.handlers:
logger.removeHandler(_CURRENT_HANDLER)
_CURRENT_HANDLER.listener.stop()
logger.addHandler(handler)
_CURRENT_HANDLER = handler
if logger.getEffectiveLevel() > levelno:
logger.setLevel(levelno)
return handler
def validate_log_level(level: typing.Union[str, int]) -> int:
"""
Return a logging level integer for level comparison.
Parameters
----------
level : str or int
The logging level string or integer value.
Returns
-------
log_level : int
The integral log level.
Raises
------
ValueError
If the logging level is invalid.
"""
if isinstance(level, int):
return level
if isinstance(level, str):
levelno = logging.getLevelName(level)
else:
raise TypeError(
f"Invalid type {type(level)} of argument level. "
"Must be of type int or str."
)
if not isinstance(levelno, int):
raise ValueError(
f"Invalid logging level {levelno!r} (use e.g., DEBUG or 6)"
)
return levelno
def get_handler():
"""
Return the handler configured by the most recent call to
:func:`configure_pcds_logging`.
If :func:`configure_pcds_logging` has not yet been called, this returns
``None``.
"""
return _CURRENT_HANDLER
def log_exception(
exc_info,
*,
context='exception',
message=None,
level=logging.ERROR,
stacklevel=1,
):
"""
Log an exception to the central server (i.e., logstash/grafana).
Parameters
----------
exc_info : (exc_type, exc_value, exc_traceback)
The exception information.
context : str, optional
Additional context for the log message.
message : str, optional
Override the default log message.
level : int, optional
The log level to use. Defaults to ERROR.
stacklevel : int, optional
The stack level of the message being reported. Defaults to 1, meaning
that the message will be reported as having come from the caller of
``log_exception_to_central_server``. Applies only to Python 3.8+, and
ignored below.
"""
exc_type, exc_value, _ = exc_info
if issubclass(exc_type, NO_LOG_EXCEPTIONS):
return
if not logger.handlers:
# Do not allow log messages unless the central logger has been
# configured with a log handler. Otherwise, the log message will hit
# the default handler and output to the terminal.
return
message = message or f'[{context}] {exc_value}'
kwargs = dict()
if sys.version_info >= (3, 8):
kwargs = dict(stacklevel=stacklevel + 1)
logger.log(level, message, exc_info=exc_info, **kwargs)
def centralized_logging_enabled() -> bool:
"""Returns True if centralized logging should be enabled."""
fqdn = get_fully_qualified_domain_name()
return any(fqdn.endswith(domain) for domain in ALLOWED_LOG_DOMAINS)
warnings_logger = logging.getLogger(f'{__name__}.warnings')
def log_warning_handler(
message: Warning,
category: type[Warning],
filename: str,
lineno: int,
file: typing.Optional[typing.TextIO] = None,
line: typing.Optional[str] = None,
logger: logging.Logger = warnings_logger,
) -> None:
"""
Warning handler that redirects all of the warnings to a logger.
This can be used as a drop-in replacement for warnings.showwarning to
redirect unfiltered warnings into the logging stream.
Rather than duplicate the warning display text, this handler opts to
simplify it and put the extra details into the "extra" dictionary
argument in the logging library.
The warnings module displays the warnings as:
filename:lineno: category: message\\nline
(where, in all cases I've seen, "line" is generated by reading the file)
The log message generated here will simply be:
category: message
All arguments (except "logger") will be included in the "extra" dictionary.
This means they can be used in log filters without parsing the message.
The keys used will be "warning_{key}" for each keyword parameter to this
function, to avoid collisions.
Parameters
----------
message : Warning
This is the Warning object created by a warnings.warn call.
When converted using str, this becomes the string message
that was passed into warnings.warn. This will be put into the
generated log message text and into the extra dict.
category : type[Warning]
The warning type, e.g. UserWarning, DeprecationWarning, etc.
this will be put into the generated log message text and into
the extra dict.
filename : str
The name of the source code file that generated the warning.
This will be put into the extra dict.
lineno : int
The line number in the file that generated the warning. This will
be put into the extra dict.
file : file-like, optional
A file-like object that is normally used in the warnings handler as
the destination for warnings, defaulting to sys.stderr. This will
be put into the extra dict.
line : str, optional
The string line in the file that generated the warning. I have never
seen this passed into the warning handler. This will be put into
the extra dict.
logger : Logger, optional
Use this argument to override the default logger for the warnings
handler, which is the warnings_logger defined in this module.
This is currently the pcdsutils.log.warnings logger.
"""
logger.warning(
'%s: %s',
category.__name__,
message,
extra={
'warning_message': message,
'warning_category': category,
'warning_filename': filename,
'warning_lineno': lineno,
'warning_file': file,
'warning_line': line,
},
)
def | |
stopping current game.**")
await ctx.send(
f"**Starting a game of Uno with {len(game.getPlayerList())} players. " +
"Input `hand` to see your hand of cards, `time` to see total elapsed time, or " +
"`quit` to quit the game (Note: You may only quit when it is your turn). " +
"Remember to say `uno` when you only have one card left!**"
)
game.startGame()
drawn, playable, wildCard, lastCard = [], False, False, False
count = 1
for player in game.getPlayerList():
self.client.loop.create_task(self.unoAwaitInput(ctx, game, player))
hand = game.getHand(player)
msg = f"`{', '.join(card for card in hand)}` ({len(hand)} left)"
try:
await player.send(f"**== Uno ==**\n\nWelcome to Uno. You are **Player {str(count)}**." +
f" Active channel: <#{ctx.channel.id}>\n\nYour hand: {msg}\n\n" +
"**Remember to say `uno` as soon as you play your second to last card!**")
except:
self.gameChannels.remove(ctx.channel.id)
return await ctx.send("**Someone has DMs disabled; stopping current game.**")
count += 1
logo = "https://media.discordapp.net/attachments/668552771120791563/775240814636171324/logo.png"
await ctx.send(f"`Say the name of a card in your hand to play it. " +
"Say 'draw' to draw a card. Inputs are case insensitive, and you may " +
"enter the first letter of the card colour and card value (e.g. r 5).`")
while not game.getGameEndBool():
if len(game.getPlayerList()) < 2:
break
try:
playerCards = ""
discard = game.getDiscardPileCard()
colour = self.unoEmbedColour(discard)
e = Embed(
title="Uno",
description=f"**{game.getCurrentPlayer().name}'s turn!**",
colour=colour
)
file = File(
funcs.PATH + funcs.getResource(self.name, "uno_cards/") +
f"{'Xmas_' if datetime.now().month == 12 else ''}{discard.replace(' ', '_')}.png",
filename="card.png"
)
e.set_image(url="attachment://card.png")
e.set_thumbnail(url=logo)
for player in game.getPlayerList():
playerCards += f"{player.name} - {len(game.getHand(player))}\n"
e.add_field(name="Total Player Cards", value=f"```{playerCards[:-1]}```")
e.add_field(name="Discard Pile Card", value=f"`{game.getDiscardPileCard()}`")
await ctx.send(embed=e, file=file)
while True:
currentPlayer = game.getCurrentPlayer()
try:
waitForInput = await self.client.wait_for(
"message", check=lambda m: m.author == currentPlayer, timeout=120
)
decision = waitForInput.content
if decision.casefold().startswith("d"):
await ctx.send(f"`{currentPlayer.name} has drawn a card.`")
drawn, affectedPlayer, playable, wildCard = game.drawCard()
elif decision.casefold() == "quit":
await ctx.send(f"`{currentPlayer.name} has left Uno.`")
game.removePlayer(currentPlayer)
if len(game.getPlayerList()) < 2:
await ctx.send("**Not enough players for Uno; stopping current game.**")
break
elif decision.casefold().startswith(("w ", "wild")) or decision.casefold() == "w":
await waitForInput.channel.send("`What colour would you like to use? " +
"Please say the first letter of your preferred colour.`")
try:
waitForColour = await self.client.wait_for(
"message", check=lambda m: m.author == currentPlayer, timeout=120
)
except TimeoutError:
await ctx.send(f"`{currentPlayer.name} has left Uno for idling for too long.`")
game.removePlayer(currentPlayer)
break
colour = waitForColour.content
drawn, affectedPlayer, lastCard = game.playWildCard(colour, "+4" in decision)
await ctx.send(f"`{waitForInput.author.name} has played a wild card.`")
elif not decision.casefold().startswith(("b", "g", "r", "y")):
break
else:
drawn, affectedPlayer, lastCard = game.playColouredCard(decision)
await ctx.send(f"`{waitForInput.author.name} has played a card.`")
if lastCard:
for player in game.getPlayerList():
if player == waitForInput.author:
continue
self.client.loop.create_task(self.unoCallout(ctx, game, player, waitForInput.author))
lastCard = False
if playable:
_ = await self.unoDraw(affectedPlayer, drawn)
await affectedPlayer.send("**== Uno ==**\n\nYour drawn card is playable! Would you like " +
"to play it? Input `y` to play it, or anything else to keep it.")
try:
waitForOption = await self.client.wait_for(
"message", check=lambda m: m.author == affectedPlayer, timeout=120
)
playCard = waitForOption.content
if playCard.casefold().startswith("y"):
await ctx.send(f"`{waitForOption.author.name} has played the drawn card.`")
if wildCard:
await waitForOption.author.send("`What colour do you want? Please say " +
"the first letter of your preferred colour.`")
await ctx.send(f"`{waitForOption.author.name} has played a wild card.`")
try:
waitForColour = await self.client.wait_for(
"message", check=lambda m: m.author == waitForOption.author, timeout=120
)
except TimeoutError:
await ctx.send(f"`{waitForOption.author.name} has left Uno for idling for too long.`")
game.removePlayer(waitForOption.author)
break
colour = waitForColour.content
drawn, affectedPlayer, lastCard = game.playWildCard(colour, "+4" in drawn[0], True)
else:
drawn, affectedPlayer, lastCard = game.playColouredCard(drawn[0], True)
if lastCard:
for player in game.getPlayerList():
if player == waitForOption.author:
continue
self.client.loop.create_task(self.unoCallout(ctx, game, player, waitForOption.author))
lastCard = False
else:
await waitForOption.author.send("**== Uno ==**\n\nYou are keeping the card.")
except TimeoutError:
await affectedPlayer.send(f"**== Uno ==**\n\nYou have been idling for too long. " +
"You will now proceed to keep the card.")
playable, wildCard = False, False
if drawn is None:
await affectedPlayer.send("**== Uno ==**\n\nYou are about to be the victim of a Wild +4 card!" +
" Do you think it may be an illegal move? If so, you may input `y` to " +
"challenge the player.\n```== Wild +4 Challenges ==\n\nAccording to " +
"the official Uno rules, it is considered illegal for a player to use " +
"a Wild +4 card if there are still cards on their hand that match " +
"the colour of the current card on top of the discard pile. Once a Wild" +
" +4 card is played, the victim may choose to challenge the player; if " +
"challenged, the player of the Wild +4 card must then show their hand " +
"of cards to the victim.\n\nIf guilty, the challenged player draws four" +
" cards instead of the accuser as punishment whilst the accuser remains" +
" safe from drawing additional cards. However, if not guilty, then the " +
"accuser must draw a total of six cards.```\nReply with `y` if you " +
"would like to challenge the Wild +4 card play (*You may risk drawing " +
"six cards!*), or any other input to decline.")
try:
waitForOption = await self.client.wait_for(
"message", check=lambda m: m.author == affectedPlayer, timeout=120
)
decision = waitForOption.content
except TimeoutError:
await affectedPlayer.send(f"**== Uno ==**\n\nYou have been idling for too long. " +
"You will now proceed to draw four cards.")
decision = decision.casefold().startswith("y")
if decision:
await ctx.send(f"`{affectedPlayer.name} suspects that the Wild +4 card is being " +
f"played illegally! {currentPlayer.name} will now show their hand " +
f"of cards to {affectedPlayer.name}.`")
msg = f"`{', '.join(card for card in game.getHand(currentPlayer))}` " + \
f"({len(game.getHand(currentPlayer))} left)"
await affectedPlayer.send(f"**== Uno ==**\n\n{currentPlayer.name}'s hand: {msg}")
drawn, affectedPlayer, guilty = game.challengePlayer(decision)
if guilty:
await ctx.send(f"`Uh oh! {affectedPlayer.name} has been caught illegally playing " +
f"the Wild +4 card by {game.getCurrentPlayer().name}! As " +
f"punishment, {affectedPlayer.name} has been forced to draw " +
f"four cards. {game.getCurrentPlayer().name} is " +
"now safe from drawing.`")
if decision and not guilty:
await ctx.send("`It looks like the Wild +4 card has been played legally " +
f"after all. Because of that, {affectedPlayer.name} will now " +
"have to draw a total of six cards! Better luck next time.`")
if drawn:
_ = await self.unoDraw(affectedPlayer, drawn)
break
break
except TimeoutError:
await ctx.send(f"`{currentPlayer.name} has left Uno for idling for too long.`")
game.removePlayer(currentPlayer)
break
except Exception as ex:
error = str(ex)
if error.startswith("not enough values to unpack"):
error = "Invalid card."
await ctx.send(embed=funcs.errorEmbed(None, error))
first, second, third, fourth = game.getPlayerRanking()
m, s = game.getTime()
msg = ""
if first:
msg += f"1st - {first.name}\n"
discard = game.getDiscardPileCard()
colour = self.unoEmbedColour(discard)
e = Embed(title="Uno", description=f"The final card - `{discard}`", colour=colour)
file = File(
funcs.PATH + funcs.getResource(self.name, "uno_cards/") +
f"{'Xmas_' if datetime.now().month == 12 else ''}{discard.replace(' ', '_')}.png",
filename="card.png"
)
e.set_image(url="attachment://card.png")
e.set_thumbnail(url=logo)
await ctx.send(embed=e, file=file)
if second:
msg += f"2nd - {second.name}\n"
if third:
msg += f"3rd - {third.name}\n"
if fourth:
msg += f"4th - {fourth.name}\n"
if first:
await ctx.send(f"```== Uno ==\n\n{msg}\nThanks for playing!```")
await funcs.sendTime(ctx, m, s)
self.gameChannels.remove(ctx.channel.id)
@commands.cooldown(1, 10, commands.BucketType.user)
@commands.command(name="akinator", description="Play Akinator.", aliases=["ak", "akin", "aki"])
async def akinator(self, ctx):
if await self.checkGameInChannel(ctx):
return
self.gameChannels.append(ctx.channel.id)
akimage = "https://i.pinimg.com/originals/02/e3/02/02e3021cfd7210e2ebd2faac8ce289ba.png"
await ctx.send("Starting Akinator instance...")
try:
aki = Akinator()
game = await aki.start_game()
while aki.progression <= 80:
try:
await ctx.send(embed=Embed(title="Akinator", description=game).set_image(url=akimage).set_footer(
text=f"Progress: {round(aki.progression / 80 * 100, 2)}%\nRequested by: {ctx.author}"))
resp = await self.client.wait_for(
"message", check=lambda m: m.author == ctx.author and m.channel == ctx.channel,
timeout=60
)
except TimeoutError:
self.gameChannels.remove(ctx.channel.id)
return await ctx.send(f"`{ctx.author.name} has left Akinator for idling for too long.`")
if resp.content.casefold() == "b":
try:
game = await aki.back()
except CantGoBackAnyFurther:
await ctx.send(embed=funcs.errorEmbed(None, "Cannot go back any further."))
elif resp.content.casefold().startswith("q"):
self.gameChannels.remove(ctx.channel.id)
return await ctx.send(f"`{ctx.author.name} has left Akinator.`")
else:
try:
game = await aki.answer(resp.content)
except:
await ctx.send(embed=funcs.errorEmbed("Invalid answer!",
"Valid options:\n\n`y` or `yes` for yes;\n`n` or `no` for no;\n" +
"`i` or `idk` for I don't know;\n`p` or `probably` for probably;\n" +
"`pn` or `probably not` for probably not;\n`b` for back;\n`q` or `quit` to quit the game."))
await aki.win()
e | |
# Copyright (c) 2017-2021 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
from typing import (
TYPE_CHECKING,
AbstractSet,
Any,
Collection,
Mapping,
NoReturn,
Optional,
Sequence,
Union,
)
from ..damlast.daml_lf_1 import TypeConName
from ..damlast.lookup import parse_type_con_name
from ..prim import ContractData, ContractId, Party
from ..util.typing import safe_cast
__all__ = [
"ArchiveEvent",
"Boundary",
"Command",
"CreateAndExerciseCommand",
"CreateCommand",
"CreateEvent",
"Event",
"EventOrBoundary",
"ExerciseByKeyCommand",
"ExerciseCommand",
"ExerciseResponse",
"PartyInfo",
"SubmitResponse",
]
class Command:
"""
Base class for write-side commands.
This class provides no functionality on its own.
"""
def __setattr__(self, key, value) -> NoReturn:
"""
Raise :class:`AttributeError`; instances of this class are immutable.
"""
raise AttributeError("Command instances are read-only")
class CreateCommand(Command):
"""
A command that creates a contract without any predecessors.
"""
__slots__ = ("_template_id", "_payload")
if TYPE_CHECKING:
_template_id: TypeConName
_payload: ContractData
def __init__(self, template_id: Union[str, TypeConName], payload: ContractData):
"""
Initialize a :class:`CreateCommand`.
:param template_id:
The template of the contract to be created.
:param payload:
The template arguments for the contract to be created.
"""
object.__setattr__(self, "_template_id", validate_template_id(template_id))
object.__setattr__(self, "_payload", payload)
@property
def template_id(self) -> TypeConName:
"""
Return the template of the contract to be created.
"""
return self._template_id
@property
def payload(self) -> Mapping[str, Any]:
"""
Return the template arguments for the contract to be created.
"""
return self._payload
def __eq__(self, other: "Any") -> bool:
return (
isinstance(other, CreateCommand)
and self.template_id == other.template_id
and self.payload == other.payload
)
def __repr__(self) -> str:
return f"CreateCommand({self.template_id}, {self.payload})"
class CreateAndExerciseCommand(Command):
"""
A command that exercises a choice on a newly-created contract in a single transaction.
Instead of creating an instance of this command and submitting it with :meth:`Connection.submit`,
consider using :meth:`Connection.create_and_exercise` instead, which also gives you access to
the result of exercising the choice.
"""
__slots__ = ("_template_id", "_payload", "_choice", "_argument")
if TYPE_CHECKING:
_template_id: TypeConName
_payload: ContractData
_choice: str
_argument: Any
def __init__(
self,
template_id: Union[str, TypeConName],
payload: ContractData,
choice: str,
argument: Optional[Any] = None,
):
"""
Initialize a :class:`CreateAndExerciseCommand`.
:param template_id:
The template of the contract to be created.
:param payload:
The template arguments for the contract to be created.
:param choice:
The choice to exercise.
:param argument:
The choice arguments. Can be omitted for choices that take no arguments.
"""
object.__setattr__(self, "_template_id", validate_template_id(template_id))
object.__setattr__(self, "_payload", payload)
object.__setattr__(self, "_choice", choice)
object.__setattr__(self, "_argument", dict(argument) if argument is not None else dict())
@property
def template_id(self) -> TypeConName:
"""
The template of the contract to be created.
"""
return self._template_id
@property
def payload(self) -> ContractData:
"""
The template arguments for the contract to be created.
"""
return self._payload
@property
def choice(self) -> str:
"""
The choice to exercise.
"""
return self._choice
@property
def argument(self) -> Any:
"""
The choice arguments.
"""
return self._argument
def __eq__(self, other: Any) -> bool:
return (
isinstance(other, CreateAndExerciseCommand)
and self.template_id == other.template_id
and self.payload == other.payload
and self.choice == other.choice
and self.argument == other.argument
)
def __repr__(self) -> str:
return f"CreateAndExerciseCommand({self.template_id}, {self.payload}, {self.choice!r}, {self.argument})"
class ExerciseCommand(Command):
"""
A command that exercises a choice on a contract identified by its contract ID.
Instead of creating an instance of this command and submitting it with :meth:`Connection.submit`,
consider using :meth:`Connection.exercise` instead, which also gives you access to the result of
exercising the choice.
"""
__slots__ = ("_choice", "_contract_id", "_argument")
if TYPE_CHECKING:
_choice: str
_contract_id: ContractId
_argument: Optional[Any]
def __init__(self, contract_id: ContractId, choice: str, argument: Optional[Any] = None):
"""
Initialize an :class:`ExerciseCommand`.
:param contract_id:
The contract ID of the contract to exercise.
:param choice:
The choice to exercise.
:param argument:
The choice arguments. Can be omitted for choices that take no arguments.
"""
object.__setattr__(self, "_choice", safe_cast(str, choice))
object.__setattr__(self, "_contract_id", safe_cast(ContractId, contract_id))
object.__setattr__(self, "_argument", dict(argument) if argument is not None else dict())
@property
def contract_id(self) -> ContractId:
"""
The contract ID of the contract to exercise.
"""
return self._contract_id
@property
def choice(self) -> str:
"""
The choice to exercise.
"""
return self._choice
@property
def argument(self) -> Any:
"""
The choice arguments.
"""
return self._argument
def __eq__(self, other: Any) -> bool:
return (
isinstance(other, ExerciseCommand)
and self.choice == other.choice
and self.contract_id == other.contract_id
and self.argument == other.argument
)
def __repr__(self):
return f"ExerciseCommand({self.choice!r}, {self.contract_id}, {self.argument})"
class ExerciseByKeyCommand(Command):
"""
A command that exercises a choice on a contract identified by its contract key.
Instead of creating an instance of this command and submitting it with :meth:`Connection.submit`,
consider using :meth:`Connection.exercise_by_key` instead, which also gives you access to the
result of exercising the choice.
"""
__slots__ = ("_template_id", "_key", "_choice", "_argument")
if TYPE_CHECKING:
_template_id: TypeConName
_key: Any
_choice: str
_argument: Optional[Any]
def __init__(
self,
template_id: Union[str, TypeConName],
key: Any,
choice: str,
argument: Optional[Any] = None,
):
"""
Initialize an :class:`ExerciseByKeyCommand`.
:param template_id:
The contract template type.
:param key:
The contract key of the contract to exercise.
:param choice:
The choice to exercise.
:param argument:
The choice arguments. Can be omitted for choices that take no arguments.
"""
object.__setattr__(self, "_template_id", validate_template_id(template_id))
object.__setattr__(self, "_key", key)
object.__setattr__(self, "_choice", choice)
object.__setattr__(self, "_argument", dict(argument) if argument is not None else dict())
@property
def template_id(self) -> TypeConName:
"""
The contract template type.
"""
return self._template_id
@property
def key(self) -> Any:
"""
The contract key of the contract to exercise.
"""
return self._key
@property
def choice(self) -> str:
"""
The choice to exercise.
"""
return self._choice
@property
def argument(self) -> Any:
"""
The choice arguments.
"""
return self._argument
def __eq__(self, other: Any) -> bool:
return (
isinstance(other, ExerciseByKeyCommand)
and self.template_id == other.template_id
and self.key == other.key
and self.choice == other.choice
and self.argument == other.argument
)
def __repr__(self):
return f"ExerciseByKeyCommand({self.template_id}, {self.key}, {self.choice!r}, {self.argument})"
class CreateEvent:
"""
An event that indicates a newly-created contract.
"""
__slots__ = (
"_contract_id",
"_payload",
"_signatories",
"_observers",
"_agreement_text",
"_key",
)
if TYPE_CHECKING:
_contract_id: ContractId
_payload: ContractData
_signatories: AbstractSet[Party]
_observers: AbstractSet[Party]
_agreement_text: Optional[str]
_key: Optional[Any]
def __init__(
self,
contract_id: ContractId,
payload: ContractData,
signatories: Collection[Party],
observers: Collection[Party],
agreement_text: Optional[str],
key: Optional[Any],
):
object.__setattr__(self, "_contract_id", contract_id)
object.__setattr__(self, "_payload", payload)
object.__setattr__(self, "_signatories", frozenset(signatories))
object.__setattr__(self, "_observers", frozenset(observers))
object.__setattr__(self, "_agreement_text", agreement_text)
object.__setattr__(self, "_key", key)
@property
def contract_id(self) -> ContractId:
"""
The ID of the created contract.
"""
return self._contract_id
@property
def payload(self) -> ContractData:
"""
The `parameters <https://docs.daml.com/daml/reference/templates.html#template-parameters>`_
that were used to create the contract.
"""
return self._payload
@property
def signatories(self) -> AbstractSet[Party]:
"""
The `signatories <https://docs.daml.com/daml/reference/templates.html#signatory-parties>`_
for this contract as specified by the template.
"""
return self._signatories
@property
def observers(self) -> AbstractSet[Party]:
"""
The `observers <https://docs.daml.com/daml/reference/templates.html#observers>`_ for this
contract as specified explicitly by the template or implicitly as choice controllers.
"""
return self._observers
@property
def agreement_text(self) -> Optional[str]:
"""
The `agreement text <https://docs.daml.com/daml/reference/templates.html#agreements>`_ of
the contract.
"""
return self._agreement_text
@property
def key(self) -> Optional[Any]:
"""
The `key <https://docs.daml.com/daml/reference/templates.html#contract-keys-and-maintainers>`_
of the contract, if defined.
"""
return self._key
def __eq__(self, other: Any) -> bool:
return (
isinstance(other, CreateEvent)
and self.contract_id == other.contract_id
and self.payload == other.payload
and self.signatories == other.signatories
and self.observers == other.observers
and self.agreement_text == other.agreement_text
and self.key == other.key
)
class ArchiveEvent:
"""
An event that indicates a contract was archived.
"""
__slots__ = ("_contract_id",)
if TYPE_CHECKING:
_contract_id: ContractId
def __init__(self, contract_id: ContractId):
object.__setattr__(self, "_contract_id", contract_id)
@property
def contract_id(self) -> ContractId:
"""
The contract ID of the archived contract.
"""
return self._contract_id
Event = Union[CreateEvent, ArchiveEvent]
class Boundary:
"""
An event that indicates a boundary in a query stream where events can be resumed.
"""
__slots__ = ("_offset",)
if TYPE_CHECKING:
_offset: Optional[str]
def __init__(self, offset: Optional[str]):
object.__setattr__(self, "_offset", offset)
@property
def offset(self) -> Optional[str]:
"""
The offset at which this boundary occurred.
If this is ``None``, that indicates that an active contract set was requested, but the
ledger is completely empty.
"""
return self._offset
def __eq__(self, other: Any) -> bool:
return isinstance(other, Boundary) and self.offset == other.offset
def __hash__(self):
return hash(self._offset)
def __repr__(self):
return f"Boundary({self._offset!r})"
EventOrBoundary = Union[Event, Boundary]
class ExerciseResponse:
"""
Returned when directly exercising a choice using :meth:`Connection.create_and_exercise`,
:meth:`Connection.exercise`, or :meth:`Connection.exercise_by_key`.
"""
__slots__ = ("_result", "_events")
if TYPE_CHECKING:
_result: Optional[Any]
_events: Sequence[Union[CreateEvent, ArchiveEvent]]
def __init__(self, result: Optional[Any], events: Sequence[Union[CreateEvent, ArchiveEvent]]):
object.__setattr__(self, "_result", result)
object.__setattr__(self, "_events", tuple(events))
@property
def result(self) -> Optional[Any]:
"""
The return value of the choice.
"""
return self._result
| |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.clouddms_v1.types import clouddms_resources
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.clouddms.v1",
manifest={
"ListMigrationJobsRequest",
"ListMigrationJobsResponse",
"GetMigrationJobRequest",
"CreateMigrationJobRequest",
"UpdateMigrationJobRequest",
"DeleteMigrationJobRequest",
"StartMigrationJobRequest",
"StopMigrationJobRequest",
"ResumeMigrationJobRequest",
"PromoteMigrationJobRequest",
"VerifyMigrationJobRequest",
"RestartMigrationJobRequest",
"GenerateSshScriptRequest",
"VmCreationConfig",
"VmSelectionConfig",
"SshScript",
"ListConnectionProfilesRequest",
"ListConnectionProfilesResponse",
"GetConnectionProfileRequest",
"CreateConnectionProfileRequest",
"UpdateConnectionProfileRequest",
"DeleteConnectionProfileRequest",
"OperationMetadata",
},
)
class ListMigrationJobsRequest(proto.Message):
r"""Retrieve a list of all migration jobs in a given project and
location.
Attributes:
parent (str):
Required. The parent, which owns this
collection of migrationJobs.
page_size (int):
The maximum number of migration jobs to
return. The service may return fewer than this
value. If unspecified, at most 50 migration jobs
will be returned. The maximum value is 1000;
values above 1000 will be coerced to 1000.
page_token (str):
The nextPageToken value received in the
previous call to migrationJobs.list, used in the
subsequent request to retrieve the next page of
results. On first call this should be left
blank. When paginating, all other parameters
provided to migrationJobs.list must match the
call that provided the page token.
filter (str):
A filter expression that filters migration jobs listed in
the response. The expression must specify the field name, a
comparison operator, and the value that you want to use for
filtering. The value must be a string, a number, or a
boolean. The comparison operator must be either =, !=, >, or
<. For example, list migration jobs created this year by
specifying **createTime %gt;
2020-01-01T00:00:00.000000000Z.** You can also filter nested
fields. For example, you could specify
**reverseSshConnectivity.vmIp = "1.2.3.4"** to select all
migration jobs connecting through the specific SSH tunnel
bastion.
order_by (str):
Sort the results based on the migration job
name. Valid values are: "name", "name asc", and
"name desc".
"""
parent = proto.Field(proto.STRING, number=1,)
page_size = proto.Field(proto.INT32, number=2,)
page_token = proto.Field(proto.STRING, number=3,)
filter = proto.Field(proto.STRING, number=4,)
order_by = proto.Field(proto.STRING, number=5,)
class ListMigrationJobsResponse(proto.Message):
r"""Response message for 'ListMigrationJobs' request.
Attributes:
migration_jobs (Sequence[google.cloud.clouddms_v1.types.MigrationJob]):
The list of migration jobs objects.
next_page_token (str):
A token, which can be sent as ``page_token`` to retrieve the
next page. If this field is omitted, there are no subsequent
pages.
unreachable (Sequence[str]):
Locations that could not be reached.
"""
@property
def raw_page(self):
return self
migration_jobs = proto.RepeatedField(
proto.MESSAGE, number=1, message=clouddms_resources.MigrationJob,
)
next_page_token = proto.Field(proto.STRING, number=2,)
unreachable = proto.RepeatedField(proto.STRING, number=3,)
class GetMigrationJobRequest(proto.Message):
r"""Request message for 'GetMigrationJob' request.
Attributes:
name (str):
Required. Name of the migration job resource
to get.
"""
name = proto.Field(proto.STRING, number=1,)
class CreateMigrationJobRequest(proto.Message):
r"""Request message to create a new Database Migration Service
migration job in the specified project and region.
Attributes:
parent (str):
Required. The parent, which owns this
collection of migration jobs.
migration_job_id (str):
Required. The ID of the instance to create.
migration_job (google.cloud.clouddms_v1.types.MigrationJob):
Required. Represents a `migration
job <https://cloud.google.com/database-migration/docs/reference/rest/v1/projects.locations.migrationJobs>`__
object.
request_id (str):
A unique id used to identify the request. If the server
receives two requests with the same id, then the second
request will be ignored.
It is recommended to always set this value to a UUID.
The id must contain only letters (a-z, A-Z), numbers (0-9),
underscores (_), and hyphens (-). The maximum length is 40
characters.
"""
parent = proto.Field(proto.STRING, number=1,)
migration_job_id = proto.Field(proto.STRING, number=2,)
migration_job = proto.Field(
proto.MESSAGE, number=3, message=clouddms_resources.MigrationJob,
)
request_id = proto.Field(proto.STRING, number=4,)
class UpdateMigrationJobRequest(proto.Message):
r"""Request message for 'UpdateMigrationJob' request.
Attributes:
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. Field mask is used to specify the
fields to be overwritten in the migration job
resource by the update.
migration_job (google.cloud.clouddms_v1.types.MigrationJob):
Required. The migration job parameters to
update.
request_id (str):
A unique id used to identify the request. If the server
receives two requests with the same id, then the second
request will be ignored.
It is recommended to always set this value to a UUID.
The id must contain only letters (a-z, A-Z), numbers (0-9),
underscores (_), and hyphens (-). The maximum length is 40
characters.
"""
update_mask = proto.Field(
proto.MESSAGE, number=1, message=field_mask_pb2.FieldMask,
)
migration_job = proto.Field(
proto.MESSAGE, number=2, message=clouddms_resources.MigrationJob,
)
request_id = proto.Field(proto.STRING, number=3,)
class DeleteMigrationJobRequest(proto.Message):
r"""Request message for 'DeleteMigrationJob' request.
Attributes:
name (str):
Required. Name of the migration job resource
to delete.
request_id (str):
A unique id used to identify the request. If the server
receives two requests with the same id, then the second
request will be ignored.
It is recommended to always set this value to a UUID.
The id must contain only letters (a-z, A-Z), numbers (0-9),
underscores (_), and hyphens (-). The maximum length is 40
characters.
force (bool):
The destination CloudSQL connection profile
is always deleted with the migration job. In
case of force delete, the destination CloudSQL
replica database is also deleted.
"""
name = proto.Field(proto.STRING, number=1,)
request_id = proto.Field(proto.STRING, number=2,)
force = proto.Field(proto.BOOL, number=3,)
class StartMigrationJobRequest(proto.Message):
r"""Request message for 'StartMigrationJob' request.
Attributes:
name (str):
Name of the migration job resource to start.
"""
name = proto.Field(proto.STRING, number=1,)
class StopMigrationJobRequest(proto.Message):
r"""Request message for 'StopMigrationJob' request.
Attributes:
name (str):
Name of the migration job resource to stop.
"""
name = proto.Field(proto.STRING, number=1,)
class ResumeMigrationJobRequest(proto.Message):
r"""Request message for 'ResumeMigrationJob' request.
Attributes:
name (str):
Name of the migration job resource to resume.
"""
name = proto.Field(proto.STRING, number=1,)
class PromoteMigrationJobRequest(proto.Message):
r"""Request message for 'PromoteMigrationJob' request.
Attributes:
name (str):
Name of the migration job resource to
promote.
"""
name = proto.Field(proto.STRING, number=1,)
class VerifyMigrationJobRequest(proto.Message):
r"""Request message for 'VerifyMigrationJob' request.
Attributes:
name (str):
Name of the migration job resource to verify.
"""
name = proto.Field(proto.STRING, number=1,)
class RestartMigrationJobRequest(proto.Message):
r"""Request message for 'RestartMigrationJob' request.
Attributes:
name (str):
Name of the migration job resource to
restart.
"""
name = proto.Field(proto.STRING, number=1,)
class GenerateSshScriptRequest(proto.Message):
r"""Request message for 'GenerateSshScript' request.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
migration_job (str):
Name of the migration job resource to
generate the SSH script.
vm (str):
Required. Bastion VM Instance name to use or
to create.
vm_creation_config (google.cloud.clouddms_v1.types.VmCreationConfig):
The VM creation configuration
This field is a member of `oneof`_ ``vm_config``.
vm_selection_config (google.cloud.clouddms_v1.types.VmSelectionConfig):
The VM selection configuration
This field is a member of `oneof`_ ``vm_config``.
vm_port (int):
The port that will be open on the bastion
host
"""
migration_job = proto.Field(proto.STRING, number=1,)
vm = proto.Field(proto.STRING, number=2,)
vm_creation_config = proto.Field(
proto.MESSAGE, number=100, oneof="vm_config", message="VmCreationConfig",
)
vm_selection_config = proto.Field(
proto.MESSAGE, number=101, oneof="vm_config", message="VmSelectionConfig",
)
vm_port = proto.Field(proto.INT32, number=3,)
class VmCreationConfig(proto.Message):
r"""VM creation configuration message
Attributes:
vm_machine_type (str):
Required. VM instance machine type to create.
vm_zone (str):
The Google Cloud Platform zone to create the
VM in.
subnet (str):
The subnet name the vm needs to be created
in.
"""
vm_machine_type = proto.Field(proto.STRING, number=1,)
vm_zone = proto.Field(proto.STRING, number=2,)
subnet = proto.Field(proto.STRING, number=3,)
class VmSelectionConfig(proto.Message):
r"""VM selection configuration message
Attributes:
vm_zone (str):
Required. The Google Cloud Platform zone the
VM is located.
"""
vm_zone = proto.Field(proto.STRING, number=1,)
class SshScript(proto.Message):
r"""Response message for 'GenerateSshScript' request.
Attributes:
script (str):
The ssh configuration script.
"""
script = proto.Field(proto.STRING, number=1,)
class ListConnectionProfilesRequest(proto.Message):
r"""Request message for 'ListConnectionProfiles' request.
Attributes:
parent (str):
Required. The parent, which owns this
collection of connection profiles.
page_size (int):
The maximum number of connection profiles to
return. The service may return fewer than this
value. If unspecified, at most 50 connection
profiles will be returned. The maximum value is
1000; values above 1000 will be coerced to 1000.
page_token (str):
A page token, received from a previous
``ListConnectionProfiles`` call. Provide this to retrieve
the subsequent page.
| |
<filename>tests/api/endpoints/test_dir_view.py
# -*- coding: utf-8 -*-
import os
import json
import posixpath
from seaserv import seafile_api
from django.core.urlresolvers import reverse
from seahub.test_utils import BaseTestCase
from seahub.utils import check_filename_with_rename
from tests.common.utils import randstring
from seahub.settings import THUMBNAIL_ROOT
try:
from seahub.settings import LOCAL_PRO_DEV_ENV
except ImportError:
LOCAL_PRO_DEV_ENV = False
class DirViewTest(BaseTestCase):
def create_new_repo(self):
new_repo_id = seafile_api.create_repo(name='test-repo-2', desc='',
username=self.user.username, passwd=None)
return new_repo_id
def get_lib_folder_name(self, repo_id):
url = reverse('list_lib_dir', args=[repo_id])
resp = self.client.get(url, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
json_resp = json.loads(resp.content)
if len(json_resp['dirent_list']) > 0:
for dirent in json_resp['dirent_list']:
if dirent.has_key('is_dir') and dirent['is_dir']:
return dirent['obj_name']
else:
continue
return None
def setUp(self):
self.repo_id = self.repo.id
self.file_path = self.file
self.file_name = os.path.basename(self.file_path.rstrip('/'))
self.folder_path = self.folder
self.folder_name = os.path.basename(self.folder_path)
self.user_name = self.user.username
self.admin_name = self.admin.username
self.url = reverse('api-v2.1-dir-view', args=[self.repo_id])
def tearDown(self):
self.remove_repo()
# for test http GET request
def test_can_get(self):
self.login_as(self.user)
resp = self.client.get(self.url)
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert len(json_resp['dirent_list']) == 2
assert json_resp['dirent_list'][0]['type'] == 'dir'
assert json_resp['dirent_list'][0]['name'] == self.folder_name
assert json_resp['dirent_list'][1]['type'] == 'file'
assert json_resp['dirent_list'][1]['name'] == self.file_name
def test_can_get_with_dir_type_parameter(self):
self.login_as(self.user)
resp = self.client.get(self.url + '?t=d')
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert len(json_resp['dirent_list']) == 1
assert json_resp['dirent_list'][0]['type'] == 'dir'
assert json_resp['dirent_list'][0]['name'] == self.folder_name
def test_can_get_with_file_type_parameter(self):
self.login_as(self.user)
resp = self.client.get(self.url + '?t=f')
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert len(json_resp['dirent_list']) == 1
assert json_resp['dirent_list'][0]['type'] == 'file'
assert json_resp['dirent_list'][0]['name'] == self.file_name
def test_can_get_with_recursive_parameter(self):
# create a sub folder
new_dir_name = randstring(6)
seafile_api.post_dir(self.repo_id, self.folder_path,
new_dir_name, self.user_name)
self.login_as(self.user)
resp = self.client.get(self.url + '?recursive=1')
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert len(json_resp['dirent_list']) == 3
assert json_resp['dirent_list'][0]['type'] == 'dir'
assert json_resp['dirent_list'][0]['name'] == self.folder_name
assert json_resp['dirent_list'][0]['parent_dir'] == '/'
assert json_resp['dirent_list'][1]['type'] == 'dir'
assert json_resp['dirent_list'][1]['name'] == new_dir_name
assert json_resp['dirent_list'][1]['parent_dir'] == self.folder_path
assert json_resp['dirent_list'][2]['type'] == 'file'
assert json_resp['dirent_list'][2]['name'] == self.file_name
def test_can_get_with_recursive_and_dir_type_parameter(self):
# create a sub folder
new_dir_name = randstring(6)
seafile_api.post_dir(self.repo_id, self.folder_path,
new_dir_name, self.user_name)
self.login_as(self.user)
resp = self.client.get(self.url + '?recursive=1&t=d')
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert len(json_resp['dirent_list']) == 2
assert json_resp['dirent_list'][0]['type'] == 'dir'
assert json_resp['dirent_list'][0]['name'] == self.folder_name
assert json_resp['dirent_list'][0]['parent_dir'] == '/'
assert json_resp['dirent_list'][1]['type'] == 'dir'
assert json_resp['dirent_list'][1]['name'] == new_dir_name
assert json_resp['dirent_list'][1]['parent_dir'] == self.folder_path
def test_can_get_with_recursive_and_file_type_parameter(self):
# create a sub folder
new_dir_name = randstring(6)
seafile_api.post_dir(self.repo_id, self.folder_path,
new_dir_name, self.user_name)
self.login_as(self.user)
resp = self.client.get(self.url + '?recursive=1&t=f')
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert len(json_resp['dirent_list']) == 1
assert json_resp['dirent_list'][0]['type'] == 'file'
assert json_resp['dirent_list'][0]['name'] == self.file_name
def test_can_get_file_with_lock_info(self):
if not LOCAL_PRO_DEV_ENV:
return
self.login_as(self.user)
# no lock owner info returned
resp = self.client.get(self.url + '?t=f')
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert len(json_resp['dirent_list']) == 1
assert json_resp['dirent_list'][0]['type'] == 'file'
assert json_resp['dirent_list'][0]['name'] == self.file_name
assert json_resp['dirent_list'][0]['lock_owner'] == ''
# lock file
seafile_api.lock_file(self.repo_id, self.file_path, self.admin_name, 1)
# return lock owner info
resp = self.client.get(self.url + '?t=f')
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert len(json_resp['dirent_list']) == 1
assert json_resp['dirent_list'][0]['type'] == 'file'
assert json_resp['dirent_list'][0]['name'] == self.file_name
assert json_resp['dirent_list'][0]['lock_owner'] == self.admin_name
def test_can_get_file_with_star_info(self):
self.login_as(self.user)
# file is not starred
resp = self.client.get(self.url + '?t=f')
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert len(json_resp['dirent_list']) == 1
assert json_resp['dirent_list'][0]['type'] == 'file'
assert json_resp['dirent_list'][0]['name'] == self.file_name
assert json_resp['dirent_list'][0]['starred'] == False
# star file
resp = self.client.post(reverse('starredfiles'), {'repo_id': self.repo.id, 'p': self.file_path})
self.assertEqual(201, resp.status_code)
# file is starred
resp = self.client.get(self.url + '?t=f')
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert len(json_resp['dirent_list']) == 1
assert json_resp['dirent_list'][0]['type'] == 'file'
assert json_resp['dirent_list'][0]['name'] == self.file_name
assert json_resp['dirent_list'][0]['starred'] == True
def test_can_get_file_with_tag_info(self):
self.login_as(self.user)
# file has no tags
resp = self.client.get(self.url + '?t=f')
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert len(json_resp['dirent_list']) == 1
assert json_resp['dirent_list'][0]['type'] == 'file'
assert json_resp['dirent_list'][0]['name'] == self.file_name
assert not json_resp['dirent_list'][0].has_key('file_tags')
# add file tag
tag_name = randstring(6)
tag_color = randstring(6)
repo_tag_data = {'name': tag_name, 'color': tag_color}
resp = self.client.post(reverse('api-v2.1-repo-tags', args=[self.repo_id]), repo_tag_data)
json_resp = json.loads(resp.content)
repo_tag_id = json_resp['repo_tag']['repo_tag_id']
file_tag_data = {'file_path': self.file_path, 'repo_tag_id': repo_tag_id}
resp = self.client.post(reverse('api-v2.1-file-tags', args=[self.repo_id]), file_tag_data)
# file has tag
resp = self.client.get(self.url + '?t=f')
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert len(json_resp['dirent_list']) == 1
assert json_resp['dirent_list'][0]['type'] == 'file'
assert json_resp['dirent_list'][0]['name'] == self.file_name
assert json_resp['dirent_list'][0]['file_tags'][0]['repo_tag_id'] == repo_tag_id
assert json_resp['dirent_list'][0]['file_tags'][0]['tag_name'] == tag_name
assert json_resp['dirent_list'][0]['file_tags'][0]['tag_color'] == tag_color
def test_can_get_file_with_thumbnail_info(self):
self.login_as(self.user)
# create a image file
image_file_name = randstring(6) + '.jpg'
seafile_api.post_empty_file(self.repo_id, self.folder_path,
image_file_name, self.user_name)
# file has no thumbnail
resp = self.client.get(self.url + '?t=f&with_thumbnail=true&p=%s' % self.folder_path)
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert len(json_resp['dirent_list']) == 1
assert json_resp['dirent_list'][0]['type'] == 'file'
assert json_resp['dirent_list'][0]['name'] == image_file_name
assert not json_resp['dirent_list'][0].has_key('encoded_thumbnail_src')
file_id = json_resp['dirent_list'][0]['id']
# prepare thumbnail
size = 48
thumbnail_dir = os.path.join(THUMBNAIL_ROOT, str(size))
if not os.path.exists(thumbnail_dir):
os.makedirs(thumbnail_dir)
thumbnail_file = os.path.join(thumbnail_dir, file_id)
with open(thumbnail_file, 'w'):
pass
assert os.path.exists(thumbnail_file)
# file has thumbnail
resp = self.client.get(self.url + '?t=f&with_thumbnail=true&p=%s' % self.folder_path)
self.assertEqual(200, resp.status_code)
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert len(json_resp['dirent_list']) == 1
assert json_resp['dirent_list'][0]['type'] == 'file'
assert json_resp['dirent_list'][0]['name'] == image_file_name
assert image_file_name in json_resp['dirent_list'][0]['encoded_thumbnail_src']
def test_get_dir_with_invalid_perm(self):
# login as admin, then get dir info in user's repo
self.login_as(self.admin)
resp = self.client.get(self.url)
self.assertEqual(403, resp.status_code)
# for test http POST request
def test_post_operation_invalid(self):
self.login_as(self.user)
data = {'operation': 'invalid',}
resp = self.client.post(self.url + '?p=' + self.folder_path, data)
self.assertEqual(400, resp.status_code)
def test_can_create_folder(self):
self.login_as(self.user)
# delete old folder
resp = self.client.delete(self.url + '?p=' + self.folder_path,
{}, 'application/x-www-form-urlencoded')
assert None == self.get_lib_folder_name(self.repo_id)
# check folder has been deleted
assert None == self.get_lib_folder_name(self.repo_id)
new_name = randstring(6)
new_folder_path = '/' + new_name
# create file
data = {'operation': 'mkdir',}
resp = self.client.post(self.url + '?p=' + new_folder_path, data)
self.assertEqual(200, resp.status_code)
# check new folder has been created
assert new_name == self.get_lib_folder_name(self.repo_id)
def test_can_create_same_name_folder(self):
self.login_as(self.user)
folder_name = os.path.basename(self.folder_path.rstrip('/'))
new_name = check_filename_with_rename(self.repo_id, '/', folder_name)
# create file
data = {'operation': 'mkdir',}
resp = self.client.post(self.url + '?p=' + self.folder_path, data)
json_resp = json.loads(resp.content)
self.assertEqual(200, resp.status_code)
# check new folder has been created
assert new_name == json_resp['obj_name']
def test_create_folder_with_invalid_repo_perm(self):
# login as admin, then create dir in user's repo
self.login_as(self.admin)
new_name = randstring(6)
new_folder_path = '/' + new_name
# create file
data = {'operation': 'mkdir',}
resp = self.client.post(self.url + '?p=' + new_folder_path, data)
self.assertEqual(403, resp.status_code)
def test_create_folder_with_invalid_folder_perm(self):
if not LOCAL_PRO_DEV_ENV:
return
# share user's repo to admin with 'rw' permission
seafile_api.share_repo(self.repo_id, self.user_name,
self.admin_name, 'rw')
# set sub-folder permisson as 'r' for admin
seafile_api.add_folder_user_perm(self.repo_id,
self.folder_path, 'r', self.admin_name)
# admin can visit sub-folder with 'r' permission
assert seafile_api.check_permission_by_path(self.repo_id,
self.folder_path, self.admin_name) == 'r'
# login as admin, then create dir in a 'r' permission folder
self.login_as(self.admin)
new_name = randstring(6)
new_folder_path = posixpath.join(self.folder_path, new_name)
data = {'operation': 'mkdir',}
resp = self.client.post(self.url + '?p=' + new_folder_path, data)
self.assertEqual(403, resp.status_code)
def test_can_rename_folder(self):
self.login_as(self.user)
new_name = randstring(6)
# check old folder exist
assert self.folder_name == self.get_lib_folder_name(self.repo_id)
data = {'operation': 'rename', 'newname': new_name}
resp = self.client.post(self.url + '?p=' + self.folder_path, data)
self.assertEqual(200, resp.status_code)
# check old file has been renamed to new_name
assert new_name == self.get_lib_folder_name(self.repo_id)
def test_rename_folder_with_invalid_name(self):
self.login_as(self.user)
# check old folder exist
assert self.folder_name == self.get_lib_folder_name(self.repo_id)
data = {'operation': 'rename', 'newname': '123/456'}
resp = self.client.post(self.url + '?p=' + self.folder_path, data)
self.assertEqual(400, resp.status_code)
def test_can_rename_folder_with_same_name(self):
self.login_as(self.user)
# check old folder exist
assert self.folder_name == self.get_lib_folder_name(self.repo_id)
# create a new folder
new_name = randstring(6)
data = {'operation': 'mkdir',}
resp = self.client.post(self.url + '?p=/' + new_name, data)
self.assertEqual(200, resp.status_code)
# rename new folder with the same name of old folder
old_folder_name = self.folder_name
checked_name = check_filename_with_rename(self.repo_id,
'/', old_folder_name)
data = {'operation': 'rename', 'newname': checked_name}
resp = self.client.post(self.url + '?p=/' + new_name, data)
self.assertEqual(200, resp.status_code)
# check old file has been renamed to new_name
json_resp = json.loads(resp.content)
assert checked_name == json_resp['obj_name']
def test_rename_folder_with_invalid_repo_perm(self):
# login as admin, then rename dir in user's repo
self.login_as(self.admin)
new_name = randstring(6)
data = {'operation': 'rename', 'newname': new_name}
resp = self.client.post(self.url + '?p=' + self.folder_path, data)
self.assertEqual(403, resp.status_code)
def test_rename_folder_with_invalid_folder_perm(self):
if not LOCAL_PRO_DEV_ENV:
return
# share user's repo to admin with 'rw' permission
seafile_api.share_repo(self.repo_id, self.user_name,
self.admin_name, 'rw')
# set sub-folder permisson as 'r' for admin
seafile_api.add_folder_user_perm(self.repo_id,
self.folder_path, 'r', self.admin_name)
# admin can visit sub-folder with 'r' permission
assert seafile_api.check_permission_by_path(self.repo_id,
self.folder_path, self.admin_name) == 'r'
# login as admin, then rename a 'r' permission folder
self.login_as(self.admin)
new_name = randstring(6)
data = {'operation': 'rename', 'newname': new_name}
resp = self.client.post(self.url + '?p=' + self.folder_path, data)
self.assertEqual(403, resp.status_code)
def test_can_revert_folder(self):
self.login_as(self.user)
# first rename dir
new_name = randstring(6)
seafile_api.rename_file(self.repo_id, '/', self.folder_name,
new_name, self.user_name)
new_dir_path = '/' + new_name
# get commit list
commits = seafile_api.get_commit_list(self.repo_id, -1, -1)
# then revert | |
arquivo
self.vBCSTRet.xml = arquivo
self.vICMSSTRet.xml = arquivo
if self.regime_tributario == 1:
self.CSOSN.xml = arquivo
self.pCredSN.xml = arquivo
self.vCredICMSSN.xml = arquivo
else:
self.UFST.xml = arquivo
self.pBCOp.xml = arquivo
self.motDesICMS.xml = arquivo
self.vBCSTDest.xml = arquivo
self.vICMSSTDest.xml = arquivo
xml = property(get_xml, set_xml)
class Imposto(nfe_110.Imposto):
def __init__(self):
super(Imposto, self).__init__()
self.vTotTrib = TagDecimal(nome=u'vTotTrib' , codigo=u'M02', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'//det/imposto', obrigatorio=False)
self.ICMS = ICMS()
self.ISSQN = ISSQN()
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += u'<imposto>'
xml += self.vTotTrib.xml
# Enviar ICMS, IPI e II somente quando não for serviço
if not self.ISSQN.cSitTrib.valor:
xml += self.ICMS.xml
xml += self.IPI.xml
xml += self.II.xml
if self.ISSQN.cSitTrib.valor:
xml += self.ISSQN.xml
xml += self.PIS.xml
xml += self.PISST.xml
xml += self.COFINS.xml
xml += self.COFINSST.xml
xml += u'</imposto>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.vTotTrib.xml = arquivo
self.ICMS.xml = arquivo
self.IPI.xml = arquivo
self.II.xml = arquivo
self.PIS.xml = arquivo
self.PISST.xml = arquivo
self.COFINS.xml = arquivo
self.COFINSST.xml = arquivo
self.ISSQN.xml = arquivo
xml = property(get_xml, set_xml)
class CIDE(nfe_110.CIDE):
def __init__(self):
super(CIDE, self).__init__()
class Comb(nfe_110.Comb):
def get_xml(self):
if not self.cProdANP.valor:
return u''
xml = XMLNFe.get_xml(self)
xml += u'<comb>'
xml += self.cProdANP.xml
xml += self.CODIF.xml
xml += self.qTemp.xml
xml += self.UFCons.xml
xml += self.CIDE.xml
xml += u'</comb>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.cProdANP.xml = arquivo
self.CODIF.xml = arquivo
self.qTemp.xml = arquivo
self.UFCons.xml = arquivo
self.CIDE.xml = arquivo
xml = property(get_xml, set_xml)
class Arma(nfe_110.Arma):
def __init__(self):
super(Arma, self).__init__()
class Med(nfe_110.Med):
def __init__(self):
super(Med, self).__init__()
class VeicProd(nfe_110.VeicProd):
def __init__(self):
super(VeicProd, self).__init__()
self.cilin = TagCaracter(nome=u'cilin' , codigo=u'J07', tamanho=[ 1, 4], raiz=u'//det/prod/veicProd')
self.tpComb = TagCaracter(nome=u'tpComb' , codigo=u'J11', tamanho=[ 2, 2], raiz=u'//det/prod/veicProd')
self.CMT = TagCaracter(nome=u'CMT' , codigo=u'J13', tamanho=[ 1, 9], raiz=u'//det/prod/veicProd')
self.cCorDENATRAN = TagCaracter(nome=u'cCorDENATRAN', codigo=u'J24', tamanho=[ 2, 2], raiz=u'//det/prod/veicProd')
self.lota = TagInteiro(nome=u'lota' , codigo=u'J25', tamanho=[ 1, 3], raiz=u'//det/prod/veicProd')
self.tpRest = TagInteiro(nome=u'tpRest' , codigo=u'J26', tamanho=[ 1, 3], raiz=u'//det/prod/veicProd')
def get_xml(self):
if not self.chassi.valor:
return u''
xml = XMLNFe.get_xml(self)
xml += u'<veicProd>'
xml += self.tpOp.xml
xml += self.chassi.xml
xml += self.cCor.xml
xml += self.xCor.xml
xml += self.pot.xml
xml += self.cilin.xml
xml += self.pesoL.xml
xml += self.pesoB.xml
xml += self.nSerie.xml
xml += self.tpComb.xml
xml += self.nMotor.xml
xml += self.CMT.xml
xml += self.dist.xml
xml += self.anoMod.xml
xml += self.anoFab.xml
xml += self.tpPint.xml
xml += self.tpVeic.xml
xml += self.espVeic.xml
xml += self.VIN.xml
xml += self.condVeic.xml
xml += self.cMod.xml
xml += self.cCorDENATRAN.xml
xml += self.lota.xml
xml += self.tpRest.xml
xml += u'</veicProd>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.tpOp.xml = arquivo
self.chassi.xml = arquivo
self.cCor.xml = arquivo
self.xCor.xml = arquivo
self.pot.xml = arquivo
self.cilin.xml = arquivo
self.pesoL.xml = arquivo
self.pesoB.xml = arquivo
self.nSerie.xml = arquivo
self.tpComb.xml = arquivo
self.nMotor.xml = arquivo
self.CMT.xml = arquivo
self.dist.xml = arquivo
self.anoMod.xml = arquivo
self.anoFab.xml = arquivo
self.tpPint.xml = arquivo
self.tpVeic.xml = arquivo
self.espVeic.xml = arquivo
self.VIN.xml = arquivo
self.condVeic.xml = arquivo
self.cMod.xml = arquivo
self.cCorDENATRAN.xml = arquivo
self.lota.xml = arquivo
self.tpRest.xml = arquivo
xml = property(get_xml, set_xml)
class Adi(nfe_110.Adi):
def __init__(self):
super(Adi, self).__init__()
class DI(nfe_110.DI):
def __init__(self):
super(DI, self).__init__()
class Prod(nfe_110.Prod):
def __init__(self):
super(Prod, self).__init__()
self.NCM = TagCaracter(nome=u'NCM' , codigo=u'I05' , tamanho=[2, 8] , raiz=u'//det/prod')
self.qCom = TagDecimal(nome=u'qCom' , codigo=u'I10' , tamanho=[1, 15, 1], decimais=[0, 4, 4], raiz=u'//det/prod')
self.vUnCom = TagDecimal(nome=u'vUnCom' , codigo=u'I10a', tamanho=[1, 21, 1], decimais=[0, 10, 4], raiz=u'//det/prod')
self.qTrib = TagDecimal(nome=u'qTrib' , codigo=u'I14' , tamanho=[1, 15, 1], decimais=[0, 4, 4], raiz=u'//det/prod')
self.vUnTrib = TagDecimal(nome=u'vUnTrib' , codigo=u'I14a', tamanho=[1, 21, 1], decimais=[0, 10, 4], raiz=u'//det/prod')
self.vOutro = TagDecimal(nome=u'vOutro' , codigo=u'I17a', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'//det/prod', obrigatorio=False)
self.indTot = TagInteiro(nome=u'indTot' , codigo=u'I17b', tamanho=[1, 1, 1], raiz=u'//det/prod', valor=1)
self.xPed = TagCaracter(nome=u'xPed' , codigo=u'I30' , tamanho=[1, 15], raiz=u'//det/prod', obrigatorio=False)
self.nItemPed = TagCaracter(nome=u'nItemPed', codigo=u'I31' , tamanho=[1, 6], raiz=u'//det/prod', obrigatorio=False)
self.nFCI = TagCaracter(nome=u'nFCI', codigo=u'I70', tamanho=[1,36], raiz=u'//det/prod', obrigatorio=False)
self.veicProd = VeicProd()
self.comb = Comb()
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += u'<prod>'
xml += self.cProd.xml
xml += self.cEAN.xml
xml += self.xProd.xml
xml += self.NCM.xml
xml += self.EXTIPI.xml
#xml += self.genero.xml
xml += self.CFOP.xml
xml += self.uCom.xml
xml += self.qCom.xml
xml += self.vUnCom.xml
xml += self.vProd.xml
xml += self.cEANTrib.xml
xml += self.uTrib.xml
xml += self.qTrib.xml
xml += self.vUnTrib.xml
xml += self.vFrete.xml
xml += self.vSeg.xml
xml += self.vDesc.xml
xml += self.vOutro.xml
xml += self.indTot.xml
for d in self.DI:
xml += d.xml
xml += self.xPed.xml
xml += self.nItemPed.xml
xml += self.nFCI.xml
xml += self.veicProd.xml
for m in self.med:
xml += m.xml
for a in self.arma:
xml += a.xml
xml += self.comb.xml
xml += u'</prod>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.cProd.xml = arquivo
self.cEAN.xml = arquivo
self.xProd.xml = arquivo
self.NCM.xml = arquivo
self.EXTIPI.xml = arquivo
self.genero.xml = arquivo
self.CFOP.xml = arquivo
self.uCom.xml = arquivo
self.qCom.xml = arquivo
self.vUnCom.xml = arquivo
self.vProd.xml = arquivo
self.cEANTrib.xml = arquivo
self.uTrib.xml = arquivo
self.qTrib.xml = arquivo
self.vUnTrib.xml = arquivo
self.vFrete.xml = arquivo
self.vSeg.xml = arquivo
self.vDesc.xml = arquivo
self.vOutro.xml = arquivo
#
# Técnica para leitura de tags múltiplas
# As classes dessas tags, e suas filhas, devem ser
# "reenraizadas" (propriedade raiz) para poderem ser
# lidas corretamente
#
self.DI = self.le_grupo('//det/prod/DI', DI)
self.nFCI.xml = arquivo
self.veicProd.xml = arquivo
#
# Técnica para leitura de tags múltiplas
# As classes dessas tags, e suas filhas, devem ser
# "reenraizadas" (propriedade raiz) para poderem ser
# lidas corretamente
#
self.med = self.le_grupo('//det/prod/med', Med)
self.arma = self.le_grupo('//det/prod/arma', Arma)
self.comb.xml = arquivo
xml = property(get_xml, set_xml)
class Det(nfe_110.Det):
def __init__(self):
super(Det, self).__init__()
self.prod = Prod()
self.imposto = Imposto()
def cst_formatado(self):
#TODO : COLOCAR INFORMAÇÃO PARA SIMPLES
# if self.imposto.regime_tributario != 1:
# super(Det, self).cst_formatado()
# formatado = unicode(self.imposto.ICMS.orig.valor).zfill(1)
# formatado += unicode(self.imposto.ICMS.CSOSN.valor).zfill(3)
#####################################
super(Det, self).cst_formatado()
formatado = unicode(self.imposto.ICMS.orig.valor).zfill(1)
formatado += unicode(self.imposto.ICMS.CST.valor).zfill(2)
return formatado
class Compra(nfe_110.Compra):
def __init__(self):
super(Compra, self).__init__()
class Exporta(nfe_110.Exporta):
def __init__(self):
super(Exporta, self).__init__()
class ProcRef(nfe_110.ProcRef):
def __init__(self):
super(ProcRef, self).__init__()
class ObsFisco(nfe_110.ObsFisco):
def __init__(self):
super(ObsFisco, self).__init__()
class ObsCont(nfe_110.ObsCont):
def __init__(self):
super(ObsCont, self).__init__()
class InfAdic(nfe_110.InfAdic):
def __init__(self):
super(InfAdic, self).__init__()
self.infAdFisco = TagCaracter(nome=u'infAdFisco', codigo=u'Z02', tamanho=[1, 2000], raiz=u'//NFe/infNFe/infAdic', obrigatorio=False)
class Dup(nfe_110.Dup):
def __init__(self):
super(Dup, self).__init__()
class Fat(nfe_110.Fat):
def __init__(self):
super(Fat, self).__init__()
class Cobr(nfe_110.Cobr):
def __init__(self):
super(Cobr, self).__init__()
class Lacres(nfe_110.Lacres):
def __init__(self):
super(Lacres, self).__init__()
class Vol(nfe_110.Vol):
def __init__(self, xml=None):
super(Vol, self).__init__()
class Reboque(nfe_110.Reboque):
def __init__(self):
super(Reboque, self).__init__()
class VeicTransp(nfe_110.VeicTransp):
def __init__(self):
super(VeicTransp, self).__init__()
class RetTransp(nfe_110.RetTransp):
def __init__(self):
super(RetTransp, self).__init__()
class Transporta(nfe_110.Transporta):
def __init__(self):
super(Transporta, self).__init__()
class Transp(nfe_110.Transp):
def __init__(self):
super(Transp, self).__init__()
self.vagao = TagCaracter(nome=u'vagao', codigo=u'X25a', tamanho=[1, 20], raiz=u'//NFe/infNFe/transp', obrigatorio=False)
self.balsa = TagCaracter(nome=u'balsa', codigo=u'X25b', tamanho=[1, 20], raiz=u'//NFe/infNFe/transp', obrigatorio=False)
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += u'<transp>'
xml += self.modFrete.xml
xml += self.transporta.xml
xml += self.retTransp.xml
if self.balsa.valor:
xml += self.balsa.xml
elif self.vagao.valor:
xml += self.vagao.xml
else:
xml += self.veicTransp.xml
for r in self.reboque:
xml += r.xml
for v in self.vol:
xml += v.xml
xml += u'</transp>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.modFrete.xml = arquivo
self.transporta.xml = arquivo
self.retTransp.xml = arquivo
self.veicTransp.xml = arquivo
#
# Técnica para leitura de tags múltiplas
# As classes dessas tags, e suas filhas, devem ser
# "reenraizadas" (propriedade raiz) para poderem ser
# lidas corretamente
#
self.reboque = self.le_grupo('//NFe/infNFe/transp/reboque', nfe_110.Reboque)
self.vagao.xml = arquivo
self.balsa.xml = arquivo
self.vol = self.le_grupo('//NFe/infNFe/transp/vol', nfe_110.Vol)
xml = property(get_xml, set_xml)
class RetTrib(nfe_110.RetTrib):
def __init__(self):
super(RetTrib, self).__init__()
class ISSQNTot(nfe_110.ISSQNTot):
def __init__(self):
super(ISSQNTot, self).__init__()
class ICMSTot(nfe_110.ICMSTot):
def __init__(self):
super(ICMSTot, self).__init__()
class Total(nfe_110.Total):
def __init__(self):
super(Total, self).__init__()
class Entrega(nfe_110.Entrega):
def __init__(self):
super(Entrega, self).__init__()
self.CNPJ = TagCaracter(nome=u'CNPJ' , codigo=u'G02' , tamanho=[ 0, 14] , raiz=u'//NFe/infNFe/retirada')
self.CPF = TagCaracter(nome=u'CPF' , codigo=u'G02a', tamanho=[11, 11] , raiz=u'//NFe/infNFe/retirada')
def get_xml(self):
if not (self.CNPJ.valor or self.CPF.valor):
return u''
xml = XMLNFe.get_xml(self)
xml += u'<entrega>'
if self.CPF.valor:
xml += self.CPF.xml
else:
xml += self.CNPJ.xml
xml += self.xLgr.xml
xml += self.nro.xml
xml += self.xCpl.xml
xml += self.xBairro.xml
xml += self.cMun.xml
xml += self.xMun.xml
xml += self.UF.xml
xml += u'</entrega>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.CNPJ.xml = arquivo
self.CPF.xml = arquivo
self.xLgr.xml = arquivo
self.nro.xml = arquivo
self.xCpl.xml = arquivo
self.xBairro.xml = arquivo
self.cMun.xml = arquivo
self.xMun.xml = arquivo
self.UF.xml = arquivo
xml = property(get_xml, set_xml)
class Retirada(nfe_110.Retirada):
def __init__(self):
super(Retirada, self).__init__()
self.CNPJ = TagCaracter(nome=u'CNPJ' , codigo=u'F02' , tamanho=[ 0, 14] , raiz=u'//NFe/infNFe/retirada')
self.CPF = TagCaracter(nome=u'CPF' , codigo=u'F02a', tamanho=[11, 11] , raiz=u'//NFe/infNFe/retirada')
def get_xml(self):
if not (self.CNPJ.valor or self.CPF.valor):
return u''
xml = XMLNFe.get_xml(self)
xml += u'<retirada>'
if self.CPF.valor:
xml += | |
from __future__ import division
import numpy as np
import torch
import torch.nn as nn
from mmcv.cnn import normal_init
from mmdet.core import (PointGenerator, multi_apply, multiclass_nms_kp,
point_target_kp)
from mmdet.ops import DeformConv
from ..builder import build_loss
from ..registry import HEADS
from ..utils import ConvModule, bias_init_with_prob
class MultiColumnDeformConvBlock(nn.Module):
def __init__(self,
in_channels=256,
feat_channels=256,
gradient_mul=0.1):
super().__init__()
self.gradient_mul = gradient_mul
self.deform_offset_dim = 2 * (9 + 25 + 49)
# initializae dcn base offset
# DeformConv3x3
self.dcn_kernel_3 = int(np.sqrt(9))
self.dcn_pad_3 = int((self.dcn_kernel_3 - 1) / 2)
dcn_base_3 = np.arange(-self.dcn_pad_3,
self.dcn_pad_3 + 1).astype(np.float64)
dcn_base_y_3 = np.repeat(dcn_base_3, self.dcn_kernel_3)
dcn_base_x_3 = np.tile(dcn_base_3, self.dcn_kernel_3)
dcn_base_offset_3 = np.stack(
[dcn_base_y_3, dcn_base_x_3], axis=1).reshape((-1))
self.dcn_base_offset_3 = torch.tensor(dcn_base_offset_3).view(
1, -1, 1, 1)
# DeformConv5x5
self.dcn_kernel_5 = int(np.sqrt(25))
self.dcn_pad_5 = int((self.dcn_kernel_5 - 1) / 2)
dcn_base_5 = np.arange(-self.dcn_pad_5,
self.dcn_pad_5 + 1).astype(np.float64)
dcn_base_y_5 = np.repeat(dcn_base_5, self.dcn_kernel_5)
dcn_base_x_5 = np.tile(dcn_base_5, self.dcn_kernel_5)
dcn_base_offset_5 = np.stack(
[dcn_base_y_5, dcn_base_x_5], axis=1).reshape((-1))
self.dcn_base_offset_5 = torch.tensor(dcn_base_offset_5).view(1, -1, 1, 1)
# DeformConv7x7
self.dcn_kernel_7 = int(np.sqrt(49))
self.dcn_pad_7 = int((self.dcn_kernel_7 - 1) / 2)
dcn_base_7 = np.arange(-self.dcn_pad_7,
self.dcn_pad_7 + 1).astype(np.float64)
dcn_base_y_7 = np.repeat(dcn_base_7, self.dcn_kernel_7)
dcn_base_x_7 = np.tile(dcn_base_7, self.dcn_kernel_7)
dcn_base_offset_7 = np.stack(
[dcn_base_y_7, dcn_base_x_7], axis=1).reshape((-1))
self.dcn_base_offset_7 = torch.tensor(dcn_base_offset_7).view(
1, -1, 1, 1)
# initialize Layers
self.dfmconv_3 = DeformConv(in_channels, feat_channels,
self.dcn_kernel_3, 1,
self.dcn_pad_3)
self.dfmconv_5 = DeformConv(in_channels, feat_channels,
self.dcn_kernel_5, 1,
self.dcn_pad_5)
self.dfmconv_7 = DeformConv(in_channels, feat_channels,
self.dcn_kernel_7, 1,
self.dcn_pad_7)
# initialize weights
normal_init(self.dfmconv_3, std=0.01)
normal_init(self.dfmconv_5, std=0.01)
normal_init(self.dfmconv_7, std=0.01)
def forward(self, feat, deform_offset):
assert deform_offset.size(1) == self.deform_offset_dim
deform_offset_3 = deform_offset[:, :2*9, :, :]
deform_offset_5 = deform_offset[:, 2*9:2*(9+25), :, :]
deform_offset_7 = deform_offset[:, 2*(9+25):2*(9+25+49), :, :]
dcn_base_offset_3 = self.dcn_base_offset_3.type_as(feat)
dcn_base_offset_5 = self.dcn_base_offset_5.type_as(feat)
dcn_base_offset_7 = self.dcn_base_offset_7.type_as(feat)
dcn_offset_grad_mul_3 = self.gradient_mul * deform_offset_3 \
+ (1 - self.gradient_mul) * deform_offset_3.detach()
dcn_offset_3 = dcn_offset_grad_mul_3 - dcn_base_offset_3
dcn_offset_grad_mul_5 = self.gradient_mul * deform_offset_5 \
+ (1 - self.gradient_mul) * deform_offset_5.detach()
dcn_offset_5 = dcn_offset_grad_mul_5 - dcn_base_offset_5
dcn_offset_grad_mul_7 = self.gradient_mul * deform_offset_7 \
+ (1 - self.gradient_mul) * deform_offset_7.detach()
dcn_offset_7 = dcn_offset_grad_mul_7 - dcn_base_offset_7
dfmconv_feat_3 = self.dfmconv_3(feat, dcn_offset_3)
dfmconv_feat_5 = self.dfmconv_5(feat, dcn_offset_5)
dfmconv_feat_7 = self.dfmconv_7(feat, dcn_offset_7)
dfmconv_feat = torch.cat(
[dfmconv_feat_3, dfmconv_feat_5, dfmconv_feat_7],
dim=1)
return dfmconv_feat
class KpDetModule(nn.Module):
""" Sequential Block
"""
def __init__(self,
deform_conv,
cls_out_channels,
in_channels=256,
feat_channels=256,
num_reppts=9,
num_keypts=17,
gradient_mul=0.1,
transform_method='minmax',
moment_mul=0.01):
super().__init__()
self.deform_conv = deform_conv
self.gradient_mul = gradient_mul
self.transform_method = transform_method
self.moment_mul = moment_mul
keypts_out_dim = 2 * num_keypts
deform_offset_dim = 2 * (9 + 25 + 49)
self.relu = nn.ReLU(inplace=False)
# initiate conv layers
if deform_conv:
self.deform_offset_out = nn.Conv2d(
keypts_out_dim, deform_offset_dim, 1, 1, 0)
self.cls_dfm_block = MultiColumnDeformConvBlock(
in_channels, feat_channels, gradient_mul)
self.cls_out = nn.Conv2d(
feat_channels*3, cls_out_channels, 1, 1, 0)
self.bbox_param_dfm_block = MultiColumnDeformConvBlock(
in_channels, feat_channels, gradient_mul)
self.bbox_param_out = nn.Conv2d(
feat_channels*3, 4, 1, 1, 0)
self.kpt_dfm_block = MultiColumnDeformConvBlock(
in_channels, feat_channels, gradient_mul)
self.kpt_out = nn.Conv2d(
feat_channels*3, keypts_out_dim, 1, 1, 0)
else:
self.cls_conv = nn.Conv2d(
in_channels, feat_channels, 3, 1, 1)
self.cls_out = nn.Conv2d(
feat_channels, cls_out_channels, 1, 1, 0)
self.bbox_param_conv = nn.Conv2d(
in_channels, feat_channels, 3, 1, 1)
self.bbox_param_out = nn.Conv2d(
feat_channels, 4, 1, 1, 0)
self.kpt_conv = nn.Conv2d(
in_channels, feat_channels, 3, 1, 1)
self.kpt_out = nn.Conv2d(
feat_channels, keypts_out_dim, 1, 1, 0)
# init weights
bias_cls = bias_init_with_prob(0.01)
if self.deform_conv:
normal_init(self.deform_offset_out, std=0.01)
else:
normal_init(self.cls_conv, std=0.01)
normal_init(self.bbox_param_conv, std=0.01)
normal_init(self.kpt_conv, std=0.01)
normal_init(self.cls_out, std=0.01, bias=bias_cls)
normal_init(self.bbox_param_out, std=0.01)
normal_init(self.kpt_out, std=0.01)
def forward(self, cls_feat, pts_feat, kpt_offset_prev=None):
if self.deform_conv:
deform_offset = self.deform_offset_out(kpt_offset_prev)
cls_dfm_feat = self.relu(
self.cls_dfm_block(cls_feat, deform_offset))
cls_score_map = self.cls_out(cls_dfm_feat)
kpt_dfm_feat = self.relu(
self.kpt_dfm_block(pts_feat, deform_offset))
kpt_offset = self.kpt_out(kpt_dfm_feat)
kpt_offset = kpt_offset + kpt_offset_prev.detach()
bbox_param_dfm_feat = self.relu(
self.bbox_param_dfm_block(pts_feat, deform_offset))
bbox_param = self.bbox_param_out(bbox_param_dfm_feat)
bbox_offset = self.points2bbox(
kpt_offset.detach(), tranfer_param=bbox_param)
else:
cls_score_map = self.cls_out(self.relu(self.cls_conv(cls_feat)))
kpt_offset = self.kpt_out(self.relu(
self.kpt_conv(pts_feat)))
bbox_param = self.bbox_param_out(self.relu(
self.bbox_param_conv(pts_feat)))
bbox_offset = self.points2bbox(
kpt_offset.detach(), tranfer_param=bbox_param)
return cls_score_map, bbox_offset, kpt_offset
def points2bbox(self, pts, y_first=True, tranfer_param=None):
"""
Converting the points set into bounding box.
:param pts: the input points sets (fields), each points
set (fields) is represented as 2n scalar.
:param y_first: if y_fisrt=True, the point set is represented as
[y1, x1, y2, x2 ... yn, xn], otherwise the point set is
represented as [x1, y1, x2, y2 ... xn, yn].
:param transfer_param:
size: [B, 4, H, W]
Meaning of each channel:
- translate_x
- translate_y
- scale_x
- scale_y
:return: each points set is converting to a bbox [x1, y1, x2, y2].
"""
pts_reshape = pts.view(pts.shape[0], -1, 2, *pts.shape[2:])
pts_y = pts_reshape[:, :, 0, ...] if y_first else pts_reshape[:, :, 1,
...]
pts_x = pts_reshape[:, :, 1, ...] if y_first else pts_reshape[:, :, 0,
...]
if self.transform_method == 'minmax':
bbox_left = pts_x.min(dim=1, keepdim=True)[0]
bbox_right = pts_x.max(dim=1, keepdim=True)[0]
bbox_up = pts_y.min(dim=1, keepdim=True)[0]
bbox_bottom = pts_y.max(dim=1, keepdim=True)[0]
bbox = torch.cat([bbox_left, bbox_up, bbox_right, bbox_bottom],
dim=1)
elif self.transform_method == 'moment':
pts_y_mean = pts_y.mean(dim=1, keepdim=True)
pts_x_mean = pts_x.mean(dim=1, keepdim=True)
pts_y_std = torch.std(pts_y - pts_y_mean, dim=1, keepdim=True)
pts_x_std = torch.std(pts_x - pts_x_mean, dim=1, keepdim=True)
moment_transfer = (self.moment_transfer * self.moment_mul) + (
self.moment_transfer.detach() * (1 - self.moment_mul))
moment_width_transfer = moment_transfer[0]
moment_height_transfer = moment_transfer[1]
half_width = pts_x_std * torch.exp(moment_width_transfer)
half_height = pts_y_std * torch.exp(moment_height_transfer)
bbox = torch.cat([
pts_x_mean - half_width, pts_y_mean - half_height,
pts_x_mean + half_width, pts_y_mean + half_height
],
dim=1)
elif self.transform_method == 'minmax_param':
assert tranfer_param is not None
bbox_left = pts_x.min(dim=1, keepdim=True)[0]
bbox_right = pts_x.max(dim=1, keepdim=True)[0]
bbox_top = pts_y.min(dim=1, keepdim=True)[0]
bbox_bottom = pts_y.max(dim=1, keepdim=True)[0]
bbox_center_x = (bbox_left + bbox_right) / 2
bbox_center_y = (bbox_top + bbox_bottom) / 2
tranfer_param = (tranfer_param * self.moment_mul) + (
tranfer_param.detach() * (1 - self.moment_mul))
half_width = (bbox_center_x - bbox_left) * torch.exp(
tranfer_param[:, 0:1, :, :])
half_height = (bbox_center_y - bbox_top) * torch.exp(
tranfer_param[:, 1:2, :, :])
bbox_center_x = bbox_center_x + tranfer_param[:, 2:3, :, :]
bbox_center_y = bbox_center_y + tranfer_param[:, 3:4, :, :]
bbox = torch.cat([
bbox_center_x - half_width, bbox_center_y - half_height,
bbox_center_x + half_width, bbox_center_y + half_height
],
dim=1)
elif self.transform_method == 'moment_param':
assert tranfer_param is not None
pts_y_mean = pts_y.mean(dim=1, keepdim=True)
pts_x_mean = pts_x.mean(dim=1, keepdim=True)
pts_y_std = torch.std(pts_y - pts_y_mean, dim=1, keepdim=True)
pts_x_std = torch.std(pts_x - pts_x_mean, dim=1, keepdim=True)
tranfer_param = (tranfer_param * self.moment_mul) + (
tranfer_param.detach() * (1 - self.moment_mul))
pts_x_mean = pts_x_mean + tranfer_param[:, 0:1, :, :]
pts_y_mean = pts_y_mean + tranfer_param[:, 1:2, :, :]
half_width = pts_x_std * torch.exp(tranfer_param[:, 2:3, :, :])
half_height = pts_y_std * torch.exp(tranfer_param[:, 3:4, :, :])
bbox = torch.cat([
pts_x_mean - half_width, pts_y_mean - half_height,
pts_x_mean + half_width, pts_y_mean + half_height
],
dim=1)
else:
raise NotImplementedError
return bbox
@HEADS.register_module
class CascadeKpDetHead(nn.Module):
"""RepPoint head.
Args:
in_channels (int): Number of channels in the input feature map.
feat_channels (int): Number of channels of the feature map.
point_feat_channels (int): Number of channels of points features.
stacked_convs (int): How many conv layers are used.
gradient_mul (float): The multiplier to gradients from
points refinement and recognition.
point_strides (Iterable): points strides.
transform_method (str): The methods to transform RepPoints to bbox.
""" # noqa: W605
def __init__(self,
num_classes,
in_channels,
feat_channels=256,
point_feat_channels=256,
stacked_convs=3,
num_reppts=9,
num_keypts=17,
gradient_mul=0.1,
point_strides=[8, 16, 32, 64, 128],
point_base_scale=4,
conv_cfg=None,
norm_cfg=None,
loss_cls_1=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=0.5),
loss_cls_2=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=0.5),
loss_cls_3=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox_1=dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=0.5),
loss_bbox_2=dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=0.5),
loss_bbox_3=dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0),
loss_kpt_1=dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=0.5),
loss_kpt_2=dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=0.5),
loss_kpt_3=dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0),
use_grid_points=False,
center_init=True,
transform_method='moment',
moment_mul=0.01):
super().__init__()
self.in_channels = in_channels
self.num_classes = num_classes
self.feat_channels = feat_channels
self.point_feat_channels = point_feat_channels
self.stacked_convs = stacked_convs
self.num_keypts = num_keypts
self.num_reppts = 9+25+49
self.gradient_mul = gradient_mul
self.point_base_scale = point_base_scale
self.point_strides = point_strides
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.use_sigmoid_cls = loss_cls_3.get('use_sigmoid', False)
self.sampling = loss_cls_3['type'] not in ['FocalLoss']
self.loss_cls_1 = build_loss(loss_cls_1)
self.loss_cls_2 = build_loss(loss_cls_2)
self.loss_cls_3 = build_loss(loss_cls_3)
self.loss_bbox_1 = build_loss(loss_bbox_1)
self.loss_bbox_2 = build_loss(loss_bbox_2)
self.loss_bbox_3 = build_loss(loss_bbox_3)
self.loss_kpt_1 = build_loss(loss_kpt_1)
self.loss_kpt_2 = build_loss(loss_kpt_2)
self.loss_kpt_3 = build_loss(loss_kpt_3)
self.use_grid_points = use_grid_points
self.center_init = center_init
self.transform_method = transform_method
if self.transform_method == 'moment':
self.moment_transfer = nn.Parameter(
data=torch.zeros(2), requires_grad=True)
self.moment_mul = moment_mul
if self.use_sigmoid_cls:
self.cls_out_channels = self.num_classes - 1
else:
self.cls_out_channels = self.num_classes
self.point_generators = [PointGenerator() for _ in self.point_strides]
self._init_layers()
def _init_layers(self):
self.relu = nn.ReLU(inplace=False)
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
self.cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.reg_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
# stage | |
<filename>src/fortiel.py
#!/usr/bin/env python3.9
# -*- coding: utf-8 -*-
# pylint: disable=too-many-lines, eval-used
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
# =-=-=-=-= =-=-=-=-= #
# =-=-=-=-= ,------. ,--. ,--. ,--. =-=-=-=-= #
# =-=-=-=-= | .---',---. ,--.--.,-' '-.`--' ,---. | | =-=-=-=-= #
# =-=-=-=-= | `--,| .-. || .--''-. .-',--.| .-. :| | =-=-=-=-= #
# =-=-=-=-= | |` ' '-' '| | | | | |\ --.| | =-=-=-=-= #
# =-=-=-=-= `--' `---' `--' `--' `--' `----'`--' =-=-=-=-= #
# =-=-=-=-= =-=-=-=-= #
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
# =-=-=-=-= =-=-=-=-= #
# =-= =-= #
# = = #
# #
# Copyright (C) 2021 <NAME> #
# #
# Permission is hereby granted, free of charge, to any person obtaining a #
# copy of this software and associated documentation files #
# (the "Software"), to deal in the Software without restriction, including #
# without limitation the rights to use, copy, modify, merge, publish, #
# distribute, sublicense, and/or sell copies of the Software, and to permit #
# persons to whom the Software is furnished to do so, subject to the #
# following conditions: #
# #
# The above copyright notice and this permission notice shall be included #
# in all copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS #
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF #
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. #
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY #
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, #
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE #
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #
# #
# = = #
# =-= =-= #
# =-=-=-=-= =-=-=-=-= #
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
"""
Fortiel language translator and executor.
"""
import re
import argparse
import sys
from os import path
from abc import ABC
from dataclasses import dataclass, field
from keyword import iskeyword as is_reserved
from typing import (
cast, final,
Iterable, List, Set, Dict, Tuple, Any, Union,
Final, Optional, Callable, Literal, Pattern, Match)
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
# =-=-=-=-=-=-=-= =-=-=-=-=-=-=-= #
# =-=-=-=-= Fortiel Helper Routines =-=-=-=-= #
# =-=-=-=-=-=-=-= =-=-=-=-=-=-=-= #
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
def _make_name(name: str) -> str:
"""Compile a single-word lower case identifier."""
return re.sub(r'\s+', '', name).lower()
def _compile_re(pattern: str, dotall: bool = False) -> Pattern[str]:
"""Compile regular expression."""
flags = re.IGNORECASE | re.MULTILINE | re.VERBOSE
if dotall:
flags |= re.DOTALL
return re.compile(pattern, flags)
def _find_duplicate(strings: Iterable[str]) -> Optional[str]:
"""Find first duplicate in the list."""
strings_set: Set[str] = set()
for string in strings:
if string in strings_set:
return string
strings_set.add(string)
return None
def _find_file(file_path: str, dir_paths: List[str]) -> Optional[str]:
"""Find file in the directory list."""
file_path = path.expanduser(file_path)
if path.exists(file_path):
return path.abspath(file_path)
for dir_path in dir_paths:
rel_file_path = path.expanduser(path.join(dir_path, file_path))
if path.exists(rel_file_path):
return path.abspath(rel_file_path)
here = path.abspath(path.dirname(__file__))
rel_file_path = path.join(here, file_path)
if path.exists(rel_file_path):
return rel_file_path
return None
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
# =-=-=-=-=-=-=-= =-=-=-=-=-=-=-= #
# =-=-=-=-= Fortiel Exceptions and Messages =-=-=-=-= #
# =-=-=-=-=-=-=-= =-=-=-=-=-=-=-= #
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
class FortielError(Exception):
"""Fortiel compilation/execution error."""
def __init__(self, message: str, file_path: str, line_number: int) -> None:
super().__init__()
self.message: str = message
self.file_path: str = file_path
self.line_number: int = line_number
def __str__(self) -> str:
# Format matched GFortran error messages.
return f'{self.file_path}:{self.line_number}:1:\n\nFatal Error: {self.message}'
@final
class FortielSyntaxError(FortielError):
"""Fortiel syntax error."""
def __init__(self, message: str, file_path: str, line_number: int) -> None:
super().__init__(
f'Fortiel syntax error: {message}', file_path, line_number)
@final
class FortielRuntimeError(FortielError):
"""Fortiel runtime error."""
def __init__(self, message: str, file_path: str, line_number: int) -> None:
super().__init__(
f'Fortiel runtime error: {message}', file_path, line_number)
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
# =-=-=-=-=-=-=-= =-=-=-=-=-=-=-= #
# =-=-=-=-= Fortiel Options =-=-=-=-= #
# =-=-=-=-=-=-=-= =-=-=-=-=-=-=-= #
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
@final
class FortielOptions:
"""Preprocessor options."""
# TODO: refactor as data class.
def __init__(self) -> None:
self.defines: List[str] = []
self.include_paths: List[str] = []
self.line_marker_format: Literal['fpp', 'cpp', 'none'] = 'fpp'
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
# =-=-=-=-=-=-=-= =-=-=-=-=-=-=-= #
# =-=-=-=-= Fortiel Scanner and Directives Parser =-=-=-=-= #
# =-=-=-=-=-=-=-= =-=-=-=-=-=-=-= #
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
@dataclass
class FortielNode(ABC):
"""Fortiel syntax tree node."""
file_path: str
line_number: int
@final
@dataclass
class FortielTree:
"""Fortiel syntax tree."""
file_path: str
root_nodes: List[FortielNode] = field(default_factory=list)
@final
@dataclass
class FortielLineListNode(FortielNode):
"""The list of code lines syntax tree node."""
lines: List[str] = field(default_factory=list)
@final
@dataclass
class FortielUseNode(FortielNode):
"""The USE directive syntax tree node."""
imported_file_path: str
@final
@dataclass
class FortielLetNode(FortielNode):
"""The LET directive syntax tree node."""
name: str
arguments: Union[str, List[str], None]
value_expression: str
@final
@dataclass
class FortielDelNode(FortielNode):
"""The DEL directive syntax tree node."""
names: Union[str, List[str]]
@final
@dataclass
class FortielElifNode(FortielNode):
"""The ELSE IF directive syntax tree node."""
condition_expression: str
then_nodes: List[FortielNode] = field(default_factory=list)
@final
@dataclass
class FortielIfNode(FortielNode):
"""The IF/ELSE IF/ELSE/END IF directive syntax tree node."""
condition_expression: str
then_nodes: List[FortielNode] = field(default_factory=list)
elif_nodes: List[FortielElifNode] = field(default_factory=list)
else_nodes: List[FortielNode] = field(default_factory=list)
@final
@dataclass
class FortielDoNode(FortielNode):
"""The DO/END DO directive syntax tree node."""
index_name: str
ranges_expression: str
loop_nodes: List[FortielNode] = field(default_factory=list)
@final
@dataclass
class FortielForNode(FortielNode):
"""The FOR/END FOR directive syntax tree node."""
index_names: Union[str, List[str], None]
iterable_expression: str
loop_nodes: List[FortielNode] = field(default_factory=list)
@final
@dataclass
class FortielCallSegmentNode(FortielNode):
"""The call segment syntax tree node."""
spaces_before: str
name: str
argument: str
@final
@dataclass
class FortielPatternNode(FortielNode):
"""The PATTERN directive syntax tree node."""
pattern: Union[str, Pattern[str]]
match_nodes: List[FortielNode] = field(default_factory=list)
@final
@dataclass
class FortielSectionNode(FortielNode):
"""The SECTION directive syntax tree node."""
name: str
once: bool
pattern_nodes: List[FortielPatternNode] = field(default_factory=list)
@final
@dataclass
class FortielMacroNode(FortielNode):
"""The MACRO/END MACRO directive syntax tree node."""
name: str
pattern_nodes: List[FortielPatternNode] = field(default_factory=list)
section_nodes: List[FortielSectionNode] = field(default_factory=list)
finally_nodes: List[FortielNode] = field(default_factory=list)
@property
def is_construct(self) -> bool:
"""Is current macro a construct?"""
return len(self.section_nodes) > 0 or len(self.finally_nodes) > 0
@property
def section_names(self) -> List[str]:
"""List of the section names."""
return [node.name for node in self.section_nodes]
@final
class FortielCallNode(FortielNode):
"""The call directive syntax tree node."""
# TODO: refactor as data class.
def __init__(self, node: FortielCallSegmentNode) -> None:
super().__init__(node.file_path, node.line_number)
self.spaces_before: str = node.spaces_before
self.name: str = node.name
self.argument: str = node.argument
self.captured_nodes: List[FortielNode] = []
self.call_section_nodes: List[FortielCallSectionNode] = []
@final
class FortielCallSectionNode(FortielNode):
"""The call directive section syntax tree node."""
# TODO: refactor as data class.
def __init__(self, node: FortielCallSegmentNode) -> None:
super().__init__(node.file_path, node.line_number)
self.name: str = node.name
self.argument: str = node.argument
self.captured_nodes: List[FortielNode] = []
_FORTIEL_DIRECTIVE: Final = _compile_re(r'^\s*\#[@$]\s*(?P<directive>.*)?$')
_FORTIEL_USE: Final = _compile_re(
r'^USE\s+(?P<path>(?:\"[^\"]+\") | (?:\'[^\']+\') | (?:\<[^\>]+\>))$')
_FORTIEL_LET: Final = _compile_re(r'''
^LET\s+(?P<name>[A-Z_]\w*)\s*
(?: \(\s* (?P<arguments>
(?:\*\s*){0,2}[A-Z_]\w*
(?:\s*,\s*(?:\*\s*){0,2}[A-Z_]\w* )* ) \s*\) )?
\s*=\s*(?P<value_expression>.*)$
''')
_FORTIEL_DEFINE: Final = _compile_re(r'^DEFINE\s+(?P<name>[A-Z_]\w*)(?P<segment>.*)$')
_FORTIEL_DEL: Final = _compile_re(r'^DEL\s+(?P<names>[A-Z_]\w*(?:\s*,\s*[A-Z_]\w*)*)$')
_FORTIEL_IF: Final = _compile_re(r'^IF\s*(?P<condition_expression>.+)$')
_FORTIEL_ELIF: Final = _compile_re(r'^ELSE\s*IF\s*(?P<condition_expression>.+)$')
_FORTIEL_ELSE: Final = _compile_re(r'^ELSE$')
_FORTIEL_END_IF: Final = _compile_re(r'^END\s*IF$')
_FORTIEL_IFDEF: Final = _compile_re(r'^IFDEF\s+(?P<name>[A-Z_]\w*)$')
_FORTIEL_IFNDEF: Final = _compile_re(r'^IFNDEF\s+(?P<name>[A-Z_]\w*)$')
_FORTIEL_DO: Final = _compile_re(
r'^DO\s+(?P<index_name>[A-Z_]\w*)\s*=\s*(?P<ranges_expression>.*)$')
_FORTIEL_END_DO: Final = _compile_re(r'^END\s*DO$')
_FORTIEL_FOR: Final = _compile_re(
r'^FOR\s+(?P<index_names>[A-Z_]\w*(?:\s*,\s*[A-Z_]\w*)*)\s*IN\s*(?P<iterable_expression>.*)$')
_FORTIEL_END_FOR: Final = _compile_re(r'^END\s*FOR$')
_FORTIEL_CALL: Final = _compile_re(
r'^(?P<spaces>\s*)\@(?P<name>(?:END\s*|ELSE\s*)?[A-Z]\w*)\b(?P<argument>[^!]*)(\s*!.*)?$')
_FORTIEL_MACRO: Final = _compile_re(r'^MACRO\s+(?P<name>[A-Z]\w*)(\s+(?P<pattern>.*))?$')
_FORTIEL_PATTERN: Final = _compile_re(r'^PATTERN\s+(?P<pattern>.*)$')
_FORTIEL_SECTION: Final = _compile_re(
r'^SECTION\s+(?P<once>ONCE\s+)?(?P<name>[A-Z]\w*)(?:\s+(?P<pattern>.*))?$')
_FORTIEL_FINALLY: Final = _compile_re(r'^FINALLY$')
_FORTIEL_END_MACRO: Final = _compile_re(r'^END\s*MACRO$')
_BUILTIN_HEADERS = {'.f90': 'tiel/syntax.fd'}
class FortielParser:
"""Fortiel syntax tree parser."""
def __init__(self, file_path: str, lines: List[str]) -> None:
self._file_path: str = file_path
self._lines: List[str] = lines
self._line: str = self._lines[0]
self._multiline: str = self._line
self._line_index: int = 0
self._line_number: int = 1
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
def _matches_end(self) -> bool:
return self._line_index >= len(self._lines)
def _advance_line(self) -> None:
"""Advance to the next line, parsing the line continuations."""
self._line_index += 1
self._line_number += 1
if self._matches_end():
self._line = self._multiline = ''
else:
self._line = self._multiline = self._lines[self._line_index].rstrip()
# Parse line continuations.
while self._line.endswith('&'):
self._line_index += 1
self._line_number += 1
if self._matches_end():
message = 'unexpected end of file in continuation lines'
raise FortielSyntaxError(message, self._file_path, self._line_number)
# Update merged line.
next_line = self._lines[self._line_index].rstrip()
self._multiline += '\n' + next_line
# Update line.
next_line = next_line.lstrip()
if next_line.startswith('&'):
next_line = next_line.removeprefix('&').lstrip()
self._line = self._line.removesuffix('&').rstrip() + ' ' + next_line
def _matches_line(self, *patterns: Pattern[str]) -> Optional[Match[str]]:
if self._matches_end():
message = 'unexpected end of file'
raise FortielSyntaxError(message, self._file_path, self._line_number)
for pattern in patterns:
match = pattern.match(self._line)
if match is not None:
return match
return None
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
def parse(self) -> FortielTree:
"""Parse the source lines."""
tree = FortielTree(self._file_path)
# Add builtin headers based on file extension.
_, file_ext = path.splitext(self._file_path)
builtins_path = _BUILTIN_HEADERS.get(file_ext.lower())
if builtins_path is not None:
use_builtins_node = FortielUseNode(self._file_path, 0, builtins_path)
tree.root_nodes.append(use_builtins_node)
# Parse file contents.
while not self._matches_end():
tree.root_nodes.append(self._parse_statement())
return tree
def _parse_statement(self) -> FortielNode:
"""Parse a directive or a line list."""
if self._matches_line(_FORTIEL_DIRECTIVE):
return self._parse_directive()
if self._matches_line(_FORTIEL_CALL):
return self._parse_call_segment()
return self._parse_line_list()
def _parse_line_list(self) -> FortielLineListNode:
"""Parse a line list."""
node = FortielLineListNode(self._file_path, self._line_number)
while True:
node.lines.append(self._multiline)
self._advance_line()
if self._matches_end() or self._matches_line(_FORTIEL_DIRECTIVE, _FORTIEL_CALL):
break
return node
# | |
#===========================================================================
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#============================================================================
# Copyright (C) 2004, 2005 <NAME> <<EMAIL>>
# Copyright (C) 2005-2007 XenSource Ltd
#============================================================================
"""Representation of a single domain.
Includes support for domain construction, using
open-ended configurations.
Author: <NAME> <<EMAIL>>
"""
import logging
import time
import threading
import thread
import re
import copy
import os
import stat
import shutil
import traceback
from types import StringTypes
import xen.lowlevel.xc
from xen.util import asserts, auxbin, mkdir
from xen.util.blkif import parse_uname
import xen.util.xsm.xsm as security
from xen.util import xsconstants
from xen.util import mkdir
from xen.util.pci import serialise_pci_opts, pci_opts_list_to_sxp, \
append_default_pci_opts, \
pci_dict_to_bdf_str, pci_dict_to_xc_str, \
pci_convert_sxp_to_dict, pci_convert_dict_to_sxp, \
pci_dict_cmp, PCI_DEVFN, PCI_SLOT, PCI_FUNC, parse_hex
from xen.xend import balloon, sxp, uuid, image, arch
from xen.xend import XendOptions, XendNode, XendConfig
from xen.xend.XendConfig import scrub_password
from xen.xend.XendBootloader import bootloader, bootloader_tidy
from xen.xend.XendError import XendError, VmError
from xen.xend.XendDevices import XendDevices
from xen.xend.XendTask import XendTask
from xen.xend.xenstore.xstransact import xstransact, complete
from xen.xend.xenstore.xsutil import GetDomainPath, IntroduceDomain, SetTarget, ResumeDomain
from xen.xend.xenstore.xswatch import xswatch
from xen.xend.XendConstants import *
from xen.xend.XendAPIConstants import *
from xen.xend.XendCPUPool import XendCPUPool
from xen.xend.server.DevConstants import xenbusState
from xen.xend.server.BlktapController import TapdiskController
from xen.xend.XendVMMetrics import XendVMMetrics
from xen.xend import XendAPIStore
from xen.xend.XendPPCI import XendPPCI
from xen.xend.XendDPCI import XendDPCI
from xen.xend.XendPSCSI import XendPSCSI
from xen.xend.XendDSCSI import XendDSCSI, XendDSCSI_HBA
MIGRATE_TIMEOUT = 30.0
BOOTLOADER_LOOPBACK_DEVICE = '/dev/xvdp'
xc = xen.lowlevel.xc.xc()
xoptions = XendOptions.instance()
log = logging.getLogger("xend.XendDomainInfo")
#log.setLevel(logging.TRACE)
def create(config):
"""Creates and start a VM using the supplied configuration.
@param config: A configuration object involving lists of tuples.
@type config: list of lists, eg ['vm', ['image', 'xen.gz']]
@rtype: XendDomainInfo
@return: An up and running XendDomainInfo instance
@raise VmError: Invalid configuration or failure to start.
"""
from xen.xend import XendDomain
domconfig = XendConfig.XendConfig(sxp_obj = config)
othervm = XendDomain.instance().domain_lookup_nr(domconfig["name_label"])
if othervm is None or othervm.domid is None:
othervm = XendDomain.instance().domain_lookup_nr(domconfig["uuid"])
if othervm is not None and othervm.domid is not None:
raise VmError("Domain '%s' already exists with ID '%d'" % (domconfig["name_label"], othervm.domid))
log.debug("XendDomainInfo.create(%s)", scrub_password(config))
vm = XendDomainInfo(domconfig)
try:
vm.start()
except:
log.exception('Domain construction failed')
vm.destroy()
raise
return vm
def create_from_dict(config_dict):
"""Creates and start a VM using the supplied configuration.
@param config_dict: An configuration dictionary.
@rtype: XendDomainInfo
@return: An up and running XendDomainInfo instance
@raise VmError: Invalid configuration or failure to start.
"""
log.debug("XendDomainInfo.create_from_dict(%s)",
scrub_password(config_dict))
vm = XendDomainInfo(XendConfig.XendConfig(xapi = config_dict))
try:
vm.start()
except:
log.exception('Domain construction failed')
vm.destroy()
raise
return vm
def recreate(info, priv):
"""Create the VM object for an existing domain. The domain must not
be dying, as the paths in the store should already have been removed,
and asking us to recreate them causes problems.
@param xeninfo: Parsed configuration
@type xeninfo: Dictionary
@param priv: Is a privileged domain (Dom 0)
@type priv: bool
@rtype: XendDomainInfo
@return: A up and running XendDomainInfo instance
@raise VmError: Invalid configuration.
@raise XendError: Errors with configuration.
"""
log.debug("XendDomainInfo.recreate(%s)", scrub_password(info))
assert not info['dying']
xeninfo = XendConfig.XendConfig(dominfo = info)
xeninfo['is_control_domain'] = priv
xeninfo['is_a_template'] = False
xeninfo['auto_power_on'] = False
domid = xeninfo['domid']
uuid1 = uuid.fromString(xeninfo['uuid'])
needs_reinitialising = False
dompath = GetDomainPath(domid)
if not dompath:
raise XendError('No domain path in store for existing '
'domain %d' % domid)
log.info("Recreating domain %d, UUID %s. at %s" %
(domid, xeninfo['uuid'], dompath))
# need to verify the path and uuid if not Domain-0
# if the required uuid and vm aren't set, then that means
# we need to recreate the dom with our own values
#
# NOTE: this is probably not desirable, really we should just
# abort or ignore, but there may be cases where xenstore's
# entry disappears (eg. xenstore-rm /)
#
try:
vmpath = xstransact.Read(dompath, "vm")
if not vmpath:
if not priv:
log.warn('/local/domain/%d/vm is missing. recreate is '
'confused, trying our best to recover' % domid)
needs_reinitialising = True
raise XendError('reinit')
uuid2_str = xstransact.Read(vmpath, "uuid")
if not uuid2_str:
log.warn('%s/uuid/ is missing. recreate is confused, '
'trying our best to recover' % vmpath)
needs_reinitialising = True
raise XendError('reinit')
uuid2 = uuid.fromString(uuid2_str)
if uuid1 != uuid2:
log.warn('UUID in /vm does not match the UUID in /dom/%d.'
'Trying out best to recover' % domid)
needs_reinitialising = True
except XendError:
pass # our best shot at 'goto' in python :)
vm = XendDomainInfo(xeninfo, domid, dompath, augment = True, priv = priv,
vmpath = vmpath)
if needs_reinitialising:
vm._recreateDom()
vm._removeVm()
vm._storeVmDetails()
vm._storeDomDetails()
vm.image = image.create(vm, vm.info)
vm.image.recreate()
vm._registerWatches()
vm.refreshShutdown(xeninfo)
# register the domain in the list
from xen.xend import XendDomain
XendDomain.instance().add_domain(vm)
return vm
def restore(config):
"""Create a domain and a VM object to do a restore.
@param config: Domain SXP configuration
@type config: list of lists. (see C{create})
@rtype: XendDomainInfo
@return: A up and running XendDomainInfo instance
@raise VmError: Invalid configuration or failure to start.
@raise XendError: Errors with configuration.
"""
log.debug("XendDomainInfo.restore(%s)", scrub_password(config))
vm = XendDomainInfo(XendConfig.XendConfig(sxp_obj = config),
resume = True)
try:
vm.resume()
return vm
except:
vm.destroy()
raise
def createDormant(domconfig):
"""Create a dormant/inactive XenDomainInfo without creating VM.
This is for creating instances of persistent domains that are not
yet start.
@param domconfig: Parsed configuration
@type domconfig: XendConfig object
@rtype: XendDomainInfo
@return: A up and running XendDomainInfo instance
@raise XendError: Errors with configuration.
"""
log.debug("XendDomainInfo.createDormant(%s)", scrub_password(domconfig))
# domid does not make sense for non-running domains.
domconfig.pop('domid', None)
vm = XendDomainInfo(domconfig)
return vm
def domain_by_name(name):
"""Get domain by name
@params name: Name of the domain
@type name: string
@return: XendDomainInfo or None
"""
from xen.xend import XendDomain
return XendDomain.instance().domain_lookup_by_name_nr(name)
def shutdown_reason(code):
"""Get a shutdown reason from a code.
@param code: shutdown code
@type code: int
@return: shutdown reason
@rtype: string
"""
return DOMAIN_SHUTDOWN_REASONS.get(code, "?")
def dom_get(dom):
"""Get info from xen for an existing domain.
@param dom: domain id
@type dom: int
@return: info or None
@rtype: dictionary
"""
try:
domlist = xc.domain_getinfo(dom, 1)
if domlist and dom == domlist[0]['domid']:
return domlist[0]
except Exception, err:
# ignore missing domain
log.trace("domain_getinfo(%d) failed, ignoring: %s", dom, str(err))
return None
from xen.xend.server.pciif import parse_pci_name, PciDevice,\
get_assigned_pci_devices, get_all_assigned_pci_devices
def do_FLR(domid, is_hvm):
dev_str_list = get_assigned_pci_devices(domid)
for dev_str in dev_str_list:
try:
dev = PciDevice(parse_pci_name(dev_str))
except Exception, e:
raise VmError("pci: failed to locate device and "+
"parse it's resources - "+str(e))
dev.do_FLR(is_hvm, xoptions.get_pci_dev_assign_strict_check())
class XendDomainInfo:
"""An object represents a domain.
@TODO: try to unify dom and domid, they mean the same thing, but
xc refers to it as dom, and everywhere else, including
xenstore it is domid. The best way is to change xc's
python interface.
@ivar info: Parsed configuration
@type info: dictionary
@ivar domid: Domain ID (if VM has started)
@type domid: int or None
@ivar paused_by_admin: Is this Domain paused by command or API
@type paused_by_admin: bool
@ivar guest_bitsize: the bitsize of guest
@type guest_bitsize: int or None
@ivar alloc_mem: the memory domain allocated when booting
@type alloc_mem: int or None
@ivar vmpath: XenStore path to this VM.
@type vmpath: string
@ivar dompath: XenStore path to this Domain.
@type dompath: string
@ivar image: Reference to the VM Image.
@type image: xen.xend.image.ImageHandler
@ivar store_port: event channel to xenstored
@type store_port: int
@ivar console_port: event channel to xenconsoled
@type console_port: int
@ivar store_mfn: xenstored mfn
@type store_mfn: int
@ivar console_mfn: xenconsoled mfn
@type console_mfn: int
@ivar notes: OS image notes
@type notes: dictionary
@ivar vmWatch: reference to a watch on the xenstored vmpath
@type vmWatch: xen.xend.xenstore.xswatch
@ivar shutdownWatch: reference to watch on the xenstored domain shutdown
@type shutdownWatch: xen.xend.xenstore.xswatch
@ivar shutdownStartTime: UNIX Time when domain started shutting down.
@type shutdownStartTime: float or None
@ivar restart_in_progress: Is a domain restart thread running?
@type restart_in_progress: bool
# @ivar state: Domain state
# @type state: enum(DOM_STATE_HALTED, DOM_STATE_RUNNING, ...)
@ivar state_updated: lock for self.state
@type state_updated: threading.Condition
@ivar refresh_shutdown_lock: lock for polling shutdown state
@type refresh_shutdown_lock: threading.Condition
@ivar _deviceControllers: | |
<reponame>joycenerd/Reinforcement_Learning_2021
import os
from time import sleep
import numpy as np
from scipy.optimize import minimize
from tqdm import tqdm
import gym
import torch
import torch.nn as nn
from torch.nn.utils import clip_grad_norm_
from torch.distributions import MultivariateNormal, Categorical
from torch.utils.data.sampler import BatchSampler, SubsetRandomSampler
from tensorboardX import SummaryWriter
from mpo.actor import ActorContinuous, ActorDiscrete
from mpo.critic import CriticContinuous, CriticDiscrete
from mpo.replaybuffer import ReplayBuffer
import matplotlib.pyplot as plt
from matplotlib import animation
def bt(m):
return m.transpose(dim0=-2, dim1=-1)
def btr(m):
return m.diagonal(dim1=-2, dim2=-1).sum(-1)
def gaussian_kl(μi, μ, Ai, A):
"""
decoupled KL between two multivariate gaussian distribution
C_μ = KL(f(x|μi,Σi)||f(x|μ,Σi))
C_Σ = KL(f(x|μi,Σi)||f(x|μi,Σ))
:param μi: (B, n)
:param μ: (B, n)
:param Ai: (B, n, n)
:param A: (B, n, n)
:return: C_μ, C_Σ: scalar
mean and covariance terms of the KL
:return: mean of determinanats of Σi, Σ
ref : https://stanford.edu/~jduchi/projects/general_notes.pdf page.13
"""
n = A.size(-1)
μi = μi.unsqueeze(-1) # (B, n, 1)
μ = μ.unsqueeze(-1) # (B, n, 1)
Σi = Ai @ bt(Ai) # (B, n, n)
Σ = A @ bt(A) # (B, n, n)
Σi_det = Σi.det() # (B,)
Σ_det = Σ.det() # (B,)
# determinant can be minus due to numerical calculation error
# https://github.com/daisatojp/mpo/issues/11
Σi_det = torch.clamp_min(Σi_det, 1e-6)
Σ_det = torch.clamp_min(Σ_det, 1e-6)
Σi_inv = Σi.inverse() # (B, n, n)
Σ_inv = Σ.inverse() # (B, n, n)
inner_μ = ((μ - μi).transpose(-2, -1) @ Σi_inv @ (μ - μi)).squeeze() # (B,)
inner_Σ = torch.log(Σ_det / Σi_det) - n + btr(Σ_inv @ Σi) # (B,)
C_μ = 0.5 * torch.mean(inner_μ)
C_Σ = 0.5 * torch.mean(inner_Σ)
return C_μ, C_Σ, torch.mean(Σi_det), torch.mean(Σ_det)
def categorical_kl(p1, p2):
"""
calculates KL between two Categorical distributions
:param p1: (B, D)
:param p2: (B, D)
"""
p1 = torch.clamp_min(p1, 0.0001) # actually no need to clamp
p2 = torch.clamp_min(p2, 0.0001) # avoid zero division
return torch.mean((p1 * torch.log(p1 / p2)).sum(dim=-1))
def save_frames_as_gif(frames, path='./', filename='gym_animation.gif'):
#Mess with this to change frame size
plt.figure(figsize=(frames[0].shape[1] / 72.0, frames[0].shape[0] / 72.0), dpi=72)
patch = plt.imshow(frames[0])
plt.axis('off')
def animate(i):
patch.set_data(frames[i])
anim = animation.FuncAnimation(plt.gcf(), animate, frames = len(frames), interval=50)
anim.save(path + filename, writer='imagemagick', fps=60)
class MPO(object):
"""
Maximum A Posteriori Policy Optimization (MPO)
:param device:
:param env: gym environment
:param dual_constraint:
(float) hard constraint of the dual formulation in the E-step
correspond to [2] p.4 ε
:param kl_mean_constraint:
(float) hard constraint of the mean in the M-step
correspond to [2] p.6 ε_μ for continuous action space
:param kl_var_constraint:
(float) hard constraint of the covariance in the M-step
correspond to [2] p.6 ε_Σ for continuous action space
:param kl_constraint:
(float) hard constraint in the M-step
correspond to [2] p.6 ε_π for discrete action space
:param discount_factor: (float) discount factor used in Policy Evaluation
:param alpha_scale: (float) scaling factor of the lagrangian multiplier in the M-step
:param sample_episode_num: the number of sampled episodes
:param sample_episode_maxstep: maximum sample steps of an episode
:param sample_action_num:
:param batch_size: (int) size of the sampled mini-batch
:param episode_rerun_num:
:param mstep_iteration_num: (int) the number of iterations of the M-step
:param evaluate_episode_maxstep: maximum evaluate steps of an episode
[1] https://arxiv.org/pdf/1806.06920.pdf
[2] https://arxiv.org/pdf/1812.02256.pdf
"""
def __init__(self,
device,
env,
log_dir,
dual_constraint=0.1,
kl_mean_constraint=0.01,
kl_var_constraint=0.0001,
kl_constraint=0.01,
discount_factor=0.99,
alpha_mean_scale=1.0,
alpha_var_scale=100.0,
alpha_scale=10.0,
alpha_mean_max=0.1,
alpha_var_max=10.0,
alpha_max=1.0,
sample_episode_num=30,
sample_episode_maxstep=200,
sample_action_num=64,
batch_size=256,
episode_rerun_num=3,
mstep_iteration_num=5,
evaluate_period=10,
evaluate_episode_num=100,
evaluate_episode_maxstep=200):
self.device = device
self.env = env
self.log_dir = log_dir
if self.env.action_space.dtype == np.float32:
self.continuous_action_space = True
else: # discrete action space
self.continuous_action_space = False
# the number of dimensions of state space
self.ds = env.observation_space.shape[0]
# the number of dimensions of action space
if self.continuous_action_space:
self.da = env.action_space.shape[0]
else: # discrete action space
self.da = env.action_space.n
self.ε_dual = dual_constraint
self.ε_kl_μ = kl_mean_constraint
self.ε_kl_Σ = kl_var_constraint
self.ε_kl = kl_constraint
self.γ = discount_factor
self.α_μ_scale = alpha_mean_scale
self.α_Σ_scale = alpha_var_scale
self.α_scale = alpha_scale
self.α_μ_max = alpha_mean_max
self.α_Σ_max = alpha_var_max
self.α_max = alpha_max
self.sample_episode_num = sample_episode_num
self.sample_episode_maxstep = sample_episode_maxstep
self.sample_action_num = sample_action_num
self.batch_size = batch_size
self.episode_rerun_num = episode_rerun_num
self.mstep_iteration_num = mstep_iteration_num
self.evaluate_period = evaluate_period
self.evaluate_episode_num = evaluate_episode_num
self.evaluate_episode_maxstep = evaluate_episode_maxstep
if not self.continuous_action_space:
self.A_eye = torch.eye(self.da).to(self.device)
if self.continuous_action_space:
self.actor = ActorContinuous(env).to(self.device)
self.critic = CriticContinuous(env).to(self.device)
self.target_actor = ActorContinuous(env).to(self.device)
self.target_critic = CriticContinuous(env).to(self.device)
else: # discrete action space
self.actor = ActorDiscrete(env).to(self.device)
self.critic = CriticDiscrete(env).to(self.device)
self.target_actor = ActorDiscrete(env).to(self.device)
self.target_critic = CriticDiscrete(env).to(self.device)
for target_param, param in zip(self.target_actor.parameters(), self.actor.parameters()):
target_param.data.copy_(param.data)
target_param.requires_grad = False
for target_param, param in zip(self.target_critic.parameters(), self.critic.parameters()):
target_param.data.copy_(param.data)
target_param.requires_grad = False
self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=5e-4)
self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=1e-3)
self.norm_loss_q = nn.MSELoss()
self.η = np.random.rand()
self.α_μ = 0.0 # lagrangian multiplier for continuous action space in the M-step
self.α_Σ = 0.0 # lagrangian multiplier for continuous action space in the M-step
self.α = 0.0 # lagrangian multiplier for discrete action space in the M-step
self.replaybuffer = ReplayBuffer()
self.max_return_eval = -np.inf
self.iteration = 1
self.render = False
def train(self,
iteration_num=1000,
log_dir='log',
model_save_period=10,
render=False):
"""
:param iteration_num:
:param log_dir:
:param model_save_period:
:param render:
"""
self.render = render
log_dir = self.log_dir
# model_save_dir = os.path.join(log_dir, 'model')
model_save_dir = "checkpoints"
if not os.path.exists(model_save_dir):
os.makedirs(model_save_dir)
writer = SummaryWriter(os.path.join("runs", log_dir))
for it in range(self.iteration, iteration_num + 1):
self.__sample_trajectory(self.sample_episode_num)
buff_sz = len(self.replaybuffer)
mean_reward = self.replaybuffer.mean_reward()
mean_return = self.replaybuffer.mean_return()
mean_loss_q = []
mean_loss_p = []
mean_loss_l = []
mean_est_q = []
max_kl_μ = []
max_kl_Σ = []
max_kl = []
mean_Σ_det = []
for r in range(self.episode_rerun_num):
for indices in tqdm(
BatchSampler(
SubsetRandomSampler(range(buff_sz)), self.batch_size, drop_last=True),
desc='training {}/{}'.format(r + 1, self.episode_rerun_num)):
K = len(indices) # the sample number of states
N = self.sample_action_num # the sample number of actions per state
ds = self.ds # the number of state space dimensions
da = self.da # the number of action space dimensions
state_batch, action_batch, next_state_batch, reward_batch = zip(
*[self.replaybuffer[index] for index in indices])
state_batch = torch.from_numpy(np.stack(state_batch)).type(torch.float32).to(self.device) # (K, ds)
action_batch = torch.from_numpy(np.stack(action_batch)).type(torch.float32).to(
self.device) # (K, da) or (K,)
next_state_batch = torch.from_numpy(np.stack(next_state_batch)).type(torch.float32).to(
self.device) # (K, ds)
reward_batch = torch.from_numpy(np.stack(reward_batch)).type(torch.float32).to(self.device) # (K,)
# Policy Evaluation
# [2] 3 Policy Evaluation (Step 1)
loss_q, q = self.__update_critic_td(
state_batch=state_batch,
action_batch=action_batch,
next_state_batch=next_state_batch,
reward_batch=reward_batch,
sample_num=self.sample_action_num
)
mean_loss_q.append(loss_q.item())
mean_est_q.append(q.abs().mean().item())
# E-Step of Policy Improvement
# [2] 4.1 Finding action weights (Step 2)
with torch.no_grad():
if self.continuous_action_space:
# sample N actions per state
b_μ, b_A = self.target_actor.forward(state_batch) # (K,)
b = MultivariateNormal(b_μ, scale_tril=b_A) # (K,)
sampled_actions = b.sample((N,)) # (N, K, da)
expanded_states = state_batch[None, ...].expand(N, -1, -1) # (N, K, ds)
target_q = self.target_critic.forward(
expanded_states.reshape(-1, ds), # (N * K, ds)
sampled_actions.reshape(-1, da) # (N * K, da)
).reshape(N, K) # (N, K)
target_q_np = target_q.cpu().transpose(0, 1).numpy() # (K, N)
else: # discrete action spaces
# sample da actions per state
# Because of discrete action space, we can cover the all actions per state.
actions = torch.arange(da)[..., None].expand(da, K).to(self.device) # (da, K)
b_p = self.target_actor.forward(state_batch) # (K, da)
b = Categorical(probs=b_p) # (K,)
b_prob = b.expand((da, K)).log_prob(actions).exp() # (da, K)
expanded_actions = self.A_eye[None, ...].expand(K, -1, -1) # (K, da, da)
expanded_states = state_batch.reshape(K, 1, ds).expand((K, da, ds)) # (K, da, ds)
target_q = (
self.target_critic.forward(
expanded_states.reshape(-1, ds), # (K * da, ds)
expanded_actions.reshape(-1, da) # (K * da, da)
).reshape(K, da) # (K, da)
).transpose(0, 1) # (da, K)
b_prob_np = b_prob.cpu().transpose(0, 1).numpy() # (K, da)
target_q_np = target_q.cpu().transpose(0, 1).numpy() # (K, da)
# https://arxiv.org/pdf/1812.02256.pdf
# [2] 4.1 Finding action weights (Step 2)
# Using an exponential transformation of the Q-values
if self.continuous_action_space:
def dual(η):
"""
dual function of the non-parametric variational
Q = target_q_np (K, N)
g(η) = η*ε + η*mean(log(mean(exp(Q(s, a)/η), along=a)), along=s)
For numerical stabilization, this can be modified to
Qj = max(Q(s, a), along=a)
g(η) = η*ε + mean(Qj, along=j) + η*mean(log(mean(exp((Q(s, a)-Qj)/η), along=a)), along=s)
"""
max_q = np.max(target_q_np, 1)
return η * self.ε_dual + np.mean(max_q) \
+ η * np.mean(np.log(np.mean(np.exp((target_q_np - max_q[:, None]) / η), axis=1)))
else: # discrete action space
def dual(η):
"""
dual function of the non-parametric variational
g(η) = η*ε + η*mean(log(sum(π(a|s)*exp(Q(s, a)/η))))
We have to multiply π by exp because this is expectation.
This equation is correspond to last equation of the [2] p.15
For numerical stabilization, this can be modified to
Qj = max(Q(s, a), along=a)
g(η) = η*ε + mean(Qj, along=j) + | |
return self.audio
def save(self, basepath_noextension : str, acceptable_formats : typing.List[VNAudioFormat] = []) -> str:
"""Save the audio to the specified path. return the final path with extension.
acceptable_formats should be the list of formats that we can directly save. if it is empty, all formats are considered acceptable.
"""
# ensure there is no extension
basepath_noextension = os.path.splitext(basepath_noextension)[0]
path = ""
if len(acceptable_formats) == 0 or self.audio_format in acceptable_formats:
# we can save without conversion
extension = VNAudioFormat.to_string(self.audio_format)
path = basepath_noextension + "." + extension
if self.audio_data is None:
assert self.audio is not None
buffer = io.BytesIO()
self.audio.export(buffer, format = extension)
self.audio_data = buffer.read()
assert self.audio_data is not None
tmppath = path + ".tmp"
# make sure parent directory exists
parent_path = pathlib.Path(tmppath).parent
os.makedirs(parent_path, exist_ok=True)
with open(tmppath, "wb") as f:
f.write(self.audio_data)
os.rename(tmppath, path)
else:
chosen_format = VNAudioFormat.to_string(acceptable_formats[0])
if self.audio is None:
assert self.audio_data is not None
buffer = io.BytesIO(self.audio_data)
self.audio = pydub.AudioSegment.from_file(buffer)
path = basepath_noextension + "." + chosen_format
tmppath = path + ".tmp"
parent_path = pathlib.Path(tmppath).parent
os.makedirs(parent_path, exist_ok=True)
with open(tmppath, "wb") as f:
self.audio.export(tmppath, format = chosen_format)
os.rename(tmppath, path)
# done
return path
### classes for "code"
# We use the same model of "Function -> BasicBlock -> Instruction" from LLVM, now with different semantics:
# "Function" is still called "Function". It is the smallest unit for code reuse and "major" control flow transfer. Nothing below Section has globally visible name.
# "BasicBlock" is still called "BasicBlock". It is the smallest unit for control flow transfer. BasicBlocks end with a terminator instruction, like LLVM.
# "Instruction" is basically the same as instructions from LLVM.
# besides the concepts borrowed from LLVM, there are additional constructs unique to VN:
# Exclusive Contexts (e.g., BGM, Background, etc) are updated with UpdateContext (base class) Instructions; logically treated as writing to global variables
# Non-exclusive elements (e.g., sprite, foreground item, etc) are created with CreateElement and modified with ModifyElement (base class) instructions, and destroyed with DeleteElement instruction.
# Text wait/continue/sprite highlighting ... are implemented as attributes on text instruction
class VNInstruction(VNValue):
"""Base class for all instructions in VNModel"""
def __init__(self, ty : VNValueType) -> None:
# make sure it has the same signature as VNValue.__init__ to avoid headache when using multiple inheritance
super().__init__(ty)
def is_terminator(self) -> bool:
return False
class VNReturnInst(VNInstruction):
def __init__(self) -> None:
super().__init__(VNValueType.Void)
def is_terminator(self) -> bool:
return True
class VNCallInst(VNInstruction):
"""Call a function. We does not support function parameters.
All contexts (foreground / background image, sprites, etc) in the current function context becomes undefined afterwards
If later code requires use of the context, there will be updateXXX instructions to recover them
"""
callee_name : str = "" # the name of the function being called
callee_ref : object # VNFunction
def __init__(self, callee_name : str) -> None:
super().__init__(VNValueType.Function)
self.callee_name = callee_name
self.callee_ref = None
def set_callee(self, callee_ref : object) -> None:
"""Bind the callee ref to a function. Should be called during construction of the VNModel"""
assert callee_ref.get_name() == self.callee_name
self.callee_ref = callee_ref
def get_callee_name(self):
return self.callee_name
def get_callee(self):
return self.callee_ref
class VNTailCall(VNAttribute):
"""If this attribute is present on a call, the subsequent instructions must be a return
This is a hint that no state saving (for sprites, etc) is required for a call
"""
pass
class VNDestroy(VNAttribute):
"""This attribute represents that an instruction destroyed a context state without explicitly referencing it in operands (probably a call)"""
pass
class VNUnreachableInst(VNInstruction):
"""Warn the use if any of the following happens:
1. An unreachable instruction is actually statically reachable
2. A basic block without unreachable terminator is found statically unreachable
"""
def __init__(self) -> None:
super().__init__(VNValueType.Void)
def is_terminator(self) -> bool:
return True
class VNExitInst(VNInstruction):
"""Exit this model, possibly with an exit code (default to 0)"""
exit_code : VNValue = None
def __init__(self, exit_code : VNValue = None) -> None:
super().__init__(VNValueType.Integer)
self.exit_code = exit_code
def is_terminator(self) -> bool:
return True
class VNBranchInst(VNInstruction):
"""Conditional and unconditional branch. branch destinations must be basic blocks inside the same function"""
condition : VNValue = None
true_successor : object = None # VNBasicBlock
false_successor : object = None # VNBasicBlock
def __init__(self, *, unconditional_successor = None, condition = None, true_successor = None, false_successor = None) -> None:
"""Please specify one of the set of arguments:
For an unconditional branch:
unconditional_successor: the successor basic block
For a conditional branch:
condition: the branch condition
true_successor: the successor basic block if condition is true
false_successor: the successor basic block if condition is false
"""
super().__init__(VNValueType.Void)
if unconditional_successor is not None:
assert condition is None
assert true_successor is None
assert false_successor is None
self.condition = None
self.true_successor = unconditional_successor
self.false_successor = unconditional_successor
else:
assert condition is not None
assert true_successor is not None
assert false_successor is not None
self.condition = condition
self.true_successor = true_successor
self.false_successor = false_successor
def is_terminator(self) -> bool:
return True
class VNSayerDeclInstr(VNInstruction):
"""Declares a new sayer, most importantly it can create the sprite
If we want to (1) have a character sprite, or (2) use more than default state for a sayer,
then we need to use sayer decl and sayer update
If we have a character sprite, we disable the side image by default, otherwise the side image will be used if available. To override this, add an VNSayerSideImageUse attribute
"""
sayer_base : VNSayerInfo = None # the constant sayer info
current_state : str = ""
def __init__(self, sayer_base : VNSayerInfo) -> None:
super().__init__(VNValueType.Void)
self.sayer_base = sayer_base
current_state = sayer_base.get_default_state()
def get_sayer_info(self):
return self.sayer_base
def set_sayer_state(self, state : str) -> None:
self.current_state = state
def get_sayer_state(self) -> str :
return self.current_state
class VNSayerSideImageUse(VNAttribute):
"""Specify whether the side image should be used, if available."""
attr_name : typing.ClassVar[str] = "SideImageUse"
enable : bool = True
def __init__(self, enable : bool = True):
self.enable = enable
def get_enabled(self) -> bool:
return self.enable
class VNSayerUpdateInstr(VNInstruction):
"""Modify an existing sayer, including hiding the sprite / "destructing" the sayer"""
sayer_base : VNSayerInfo = None # the constant sayer info
sayer_last : object = None # VNSayerDeclInstr, VNSayerUpdateInstr, etc
current_state : str = ""
def __init__(self, prev) -> None:
super().__init__(VNValueType.Void)
self.sayer_last = prev
self.sayer_base = prev.get_sayer_info()
self.current_state = prev.get_sayer_state()
def get_sayer_info(self) -> VNSayerInfo:
return self.sayer_base
def get_prev(self):
return self.sayer_last
def set_sayer_state(self, state : str) -> None:
self.current_state = state
def get_sayer_state(self) -> str :
return self.current_state
class VNSayInst(VNInstruction):
"""Display text instruction
Each instruction can have:
the text to say
the sayer (can be None)
The return value of the text instruction is the text being displayed
The state of the sayer (expressions like sad, happy, etc) are on the sayer instead of text
Whether we wait/continue/... is implemented in attributes:
[wait]: wait for click after text rolled over
[continue(Text)]: continue from the text in the specified text source (e.g., a previous VNSayInst)
"""
sayer : VNValue
text : VNTextBlock
voice : VNValue
def __init__(self, sayer : VNValue, text : VNTextBlock, voice : VNValue = None) -> None:
super().__init__(VNValueType.Text)
self.sayer = sayer
self.text = text
self.voice = voice
def get_sayer(self) -> VNValue:
return self.sayer
def get_text(self) -> VNTextBlock:
return self.text
def get_voice(self) -> VNValue:
return self.voice
class VNUpdateContext(VNInstruction):
"""Base class for updating any exclusive context"""
def __init__(self):
super().__init__(VNValueType.Void)
class VNUpdateBackground(VNUpdateContext):
"""Update background image (or maybe a Screen)"""
background : VNValue = None
def __init__(self, background : VNValue):
super().__init__()
assert background.get_type() in [VNValueType.Image, VNValueType.Screen]
self.background = background
def get_background(self):
return self.background
class VNUpdateBGMInstr(VNUpdateContext):
"""Update the background music (can be a list for circulation)
When without any attributes, we default to "loop all"
Later on we may support attributes to set it to random loop, etc
"""
bgm_list : typing.List[VNValue] = []
def __init__(self, bgm):
super().__init__()
if isinstance(bgm, list):
self.bgm_list = bgm
else:
| |
<gh_stars>0
#
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import boto.cloudformation as cfn
import fixtures
import json
from mox3 import mox
import os
import subprocess
import tempfile
import testtools
import testtools.matchers as ttm
from heat_cfntools.cfntools import cfn_helper
class FakePOpen():
def __init__(self, stdout='', stderr='', returncode=0):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
def communicate(self):
return (self.stdout, self.stderr)
def wait(self):
pass
class MockPopenTestCase(testtools.TestCase):
def mock_cmd_run(self, command, cwd=None, env=None):
return subprocess.Popen(
command, cwd=cwd, env=env, stderr=-1, stdout=-1)
def mock_unorder_cmd_run(self, command, cwd=None, env=None):
return subprocess.Popen(
command, cwd=cwd, env=env, stderr=-1, stdout=-1).InAnyOrder()
def setUp(self):
super(MockPopenTestCase, self).setUp()
self.m = mox.Mox()
self.m.StubOutWithMock(subprocess, 'Popen')
self.addCleanup(self.m.UnsetStubs)
class TestCommandRunner(MockPopenTestCase):
def test_command_runner(self):
self.mock_cmd_run(['su', 'root', '-c', '/bin/command1']).AndReturn(
FakePOpen('All good'))
self.mock_cmd_run(['su', 'root', '-c', '/bin/command2']).AndReturn(
FakePOpen('Doing something', 'error', -1))
self.m.ReplayAll()
cmd2 = cfn_helper.CommandRunner('/bin/command2')
cmd1 = cfn_helper.CommandRunner('/bin/command1', cmd2)
cmd1.run('root')
self.assertEqual(
'CommandRunner:\n\tcommand: /bin/command1\n\tstdout: All good',
str(cmd1))
self.assertEqual(
'CommandRunner:\n\tcommand: /bin/command2\n\tstatus: -1\n'
'\tstdout: Doing something\n\tstderr: error',
str(cmd2))
self.m.VerifyAll()
class TestPackages(MockPopenTestCase):
def test_yum_install(self):
install_list = []
for pack in ('httpd', 'wordpress', 'mysql-server'):
self.mock_unorder_cmd_run(
['su', 'root', '-c', 'rpm -q %s' % pack]) \
.AndReturn(FakePOpen(returncode=1))
self.mock_unorder_cmd_run(
['su', 'root', '-c',
'yum -y --showduplicates list available %s' % pack]) \
.AndReturn(FakePOpen(returncode=0))
install_list.append(pack)
# This mock call corresponding to 'su root -c yum -y install .*'
# But there is no way to ignore the order of the parameters, so only
# check the return value.
self.mock_cmd_run(mox.IgnoreArg()).AndReturn(FakePOpen(
returncode=0))
self.m.ReplayAll()
packages = {
"yum": {
"mysql-server": [],
"httpd": [],
"wordpress": []
}
}
cfn_helper.PackagesHandler(packages).apply_packages()
self.m.VerifyAll()
def test_zypper_install(self):
install_list = []
for pack in ('httpd', 'wordpress', 'mysql-server'):
self.mock_unorder_cmd_run(
['su', 'root', '-c', 'rpm -q %s' % pack]) \
.AndReturn(FakePOpen(returncode=1))
self.mock_unorder_cmd_run(
['su', 'root', '-c',
'zypper -n --no-refresh search %s' % pack]) \
.AndReturn(FakePOpen(returncode=0))
install_list.append(pack)
# This mock call corresponding to 'su root -c zypper -n install .*'
# But there is no way to ignore the order of the parameters, so only
# check the return value.
self.mock_cmd_run(mox.IgnoreArg()).AndReturn(FakePOpen(
returncode=0))
self.m.ReplayAll()
packages = {
"zypper": {
"mysql-server": [],
"httpd": [],
"wordpress": []
}
}
cfn_helper.PackagesHandler(packages).apply_packages()
self.m.VerifyAll()
def test_apt_install(self):
# This mock call corresponding to
# 'DEBIAN_FRONTEND=noninteractive su root -c apt-get -y install .*'
# But there is no way to ignore the order of the parameters, so only
# check the return value.
self.mock_cmd_run(mox.IgnoreArg()).AndReturn(FakePOpen(
returncode=0))
self.m.ReplayAll()
packages = {
"apt": {
"mysql-server": [],
"httpd": [],
"wordpress": []
}
}
cfn_helper.PackagesHandler(packages).apply_packages()
self.m.VerifyAll()
class TestServicesHandler(MockPopenTestCase):
def test_services_handler_systemd(self):
self.m.StubOutWithMock(os.path, 'exists')
os.path.exists('/bin/systemctl').MultipleTimes().AndReturn(True)
# apply_services
self.mock_unorder_cmd_run(
['su', 'root', '-c', '/bin/systemctl enable httpd.service']
).AndReturn(FakePOpen())
self.mock_unorder_cmd_run(
['su', 'root', '-c', '/bin/systemctl status httpd.service']
).AndReturn(FakePOpen(returncode=-1))
self.mock_unorder_cmd_run(
['su', 'root', '-c', '/bin/systemctl start httpd.service']
).AndReturn(FakePOpen())
self.mock_unorder_cmd_run(
['su', 'root', '-c', '/bin/systemctl enable mysqld.service']
).AndReturn(FakePOpen())
self.mock_unorder_cmd_run(
['su', 'root', '-c', '/bin/systemctl status mysqld.service']
).AndReturn(FakePOpen(returncode=-1))
self.mock_unorder_cmd_run(
['su', 'root', '-c', '/bin/systemctl start mysqld.service']
).AndReturn(FakePOpen())
# monitor_services not running
self.mock_unorder_cmd_run(
['su', 'root', '-c', '/bin/systemctl status httpd.service']
).AndReturn(FakePOpen(returncode=-1))
self.mock_unorder_cmd_run(
['su', 'root', '-c', '/bin/systemctl start httpd.service']
).AndReturn(FakePOpen())
self.mock_unorder_cmd_run(
['su', 'root', '-c', '/bin/services_restarted']
).AndReturn(FakePOpen())
self.mock_unorder_cmd_run(
['su', 'root', '-c', '/bin/systemctl status mysqld.service']
).AndReturn(FakePOpen(returncode=-1))
self.mock_unorder_cmd_run(
['su', 'root', '-c', '/bin/systemctl start mysqld.service']
).AndReturn(FakePOpen())
self.mock_unorder_cmd_run(
['su', 'root', '-c', '/bin/services_restarted']
).AndReturn(FakePOpen())
# monitor_services running
self.mock_unorder_cmd_run(
['su', 'root', '-c', '/bin/systemctl status httpd.service']
).AndReturn(FakePOpen())
self.mock_unorder_cmd_run(
['su', 'root', '-c', '/bin/systemctl status mysqld.service']
).AndReturn(FakePOpen())
self.m.ReplayAll()
services = {
"systemd": {
"mysqld": {"enabled": "true", "ensureRunning": "true"},
"httpd": {"enabled": "true", "ensureRunning": "true"}
}
}
hooks = [
cfn_helper.Hook(
'hook1',
'service.restarted',
'Resources.resource1.Metadata',
'root',
'/bin/services_restarted')
]
sh = cfn_helper.ServicesHandler(services, 'resource1', hooks)
sh.apply_services()
# services not running
sh.monitor_services()
# services running
sh.monitor_services()
self.m.VerifyAll()
def test_services_handler_systemd_disabled(self):
self.m.StubOutWithMock(os.path, 'exists')
os.path.exists('/bin/systemctl').MultipleTimes().AndReturn(True)
# apply_services
self.mock_unorder_cmd_run(
['su', 'root', '-c', '/bin/systemctl disable httpd.service']
).AndReturn(FakePOpen())
self.mock_unorder_cmd_run(
['su', 'root', '-c', '/bin/systemctl status httpd.service']
).AndReturn(FakePOpen())
self.mock_unorder_cmd_run(
['su', 'root', '-c', '/bin/systemctl stop httpd.service']
).AndReturn(FakePOpen())
self.mock_unorder_cmd_run(
['su', 'root', '-c', '/bin/systemctl disable mysqld.service']
).AndReturn(FakePOpen())
self.mock_unorder_cmd_run(
['su', 'root', '-c', '/bin/systemctl status mysqld.service']
).AndReturn(FakePOpen())
self.mock_unorder_cmd_run(
['su', 'root', '-c', '/bin/systemctl stop mysqld.service']
).AndReturn(FakePOpen())
self.m.ReplayAll()
services = {
"systemd": {
"mysqld": {"enabled": "false", "ensureRunning": "false"},
"httpd": {"enabled": "false", "ensureRunning": "false"}
}
}
hooks = [
cfn_helper.Hook(
'hook1',
'service.restarted',
'Resources.resource1.Metadata',
'root',
'/bin/services_restarted')
]
sh = cfn_helper.ServicesHandler(services, 'resource1', hooks)
sh.apply_services()
self.m.VerifyAll()
def test_services_handler_sysv_service_chkconfig(self):
self.m.StubOutWithMock(os.path, 'exists')
os.path.exists('/bin/systemctl').MultipleTimes().AndReturn(False)
os.path.exists('/sbin/service').MultipleTimes().AndReturn(True)
os.path.exists('/sbin/chkconfig').MultipleTimes().AndReturn(True)
# apply_services
self.mock_cmd_run(
['su', 'root', '-c', '/sbin/chkconfig httpd on']
).AndReturn(FakePOpen())
self.mock_cmd_run(
['su', 'root', '-c', '/sbin/service httpd status']
).AndReturn(FakePOpen(returncode=-1))
self.mock_cmd_run(
['su', 'root', '-c', '/sbin/service httpd start']
).AndReturn(FakePOpen())
# monitor_services not running
self.mock_cmd_run(
['su', 'root', '-c', '/sbin/service httpd status']
).AndReturn(FakePOpen(returncode=-1))
self.mock_cmd_run(
['su', 'root', '-c', '/sbin/service httpd start']
).AndReturn(FakePOpen())
self.mock_cmd_run(
['su', 'root', '-c', '/bin/services_restarted']
).AndReturn(FakePOpen())
# monitor_services running
self.mock_cmd_run(
['su', 'root', '-c', '/sbin/service httpd status']
).AndReturn(FakePOpen())
self.m.ReplayAll()
services = {
"sysvinit": {
"httpd": {"enabled": "true", "ensureRunning": "true"}
}
}
hooks = [
cfn_helper.Hook(
'hook1',
'service.restarted',
'Resources.resource1.Metadata',
'root',
'/bin/services_restarted')
]
sh = cfn_helper.ServicesHandler(services, 'resource1', hooks)
sh.apply_services()
# services not running
sh.monitor_services()
# services running
sh.monitor_services()
self.m.VerifyAll()
def test_services_handler_sysv_disabled_service_chkconfig(self):
self.m.StubOutWithMock(os.path, 'exists')
os.path.exists('/bin/systemctl').MultipleTimes().AndReturn(False)
os.path.exists('/sbin/service').MultipleTimes().AndReturn(True)
os.path.exists('/sbin/chkconfig').MultipleTimes().AndReturn(True)
# apply_services
self.mock_cmd_run(
['su', 'root', '-c', '/sbin/chkconfig httpd off']
).AndReturn(FakePOpen())
self.mock_cmd_run(
['su', 'root', '-c', '/sbin/service httpd status']
).AndReturn(FakePOpen())
self.mock_cmd_run(
['su', 'root', '-c', '/sbin/service httpd stop']
).AndReturn(FakePOpen())
self.m.ReplayAll()
services = {
"sysvinit": {
"httpd": {"enabled": "false", "ensureRunning": "false"}
}
}
hooks = [
cfn_helper.Hook(
'hook1',
'service.restarted',
'Resources.resource1.Metadata',
'root',
'/bin/services_restarted')
]
sh = cfn_helper.ServicesHandler(services, 'resource1', hooks)
sh.apply_services()
self.m.VerifyAll()
def test_services_handler_sysv_systemctl(self):
self.m.StubOutWithMock(os.path, 'exists')
os.path.exists('/bin/systemctl').MultipleTimes().AndReturn(True)
# apply_services
self.mock_cmd_run(
['su', 'root', '-c', '/bin/systemctl enable httpd.service']
).AndReturn(FakePOpen())
self.mock_cmd_run(
['su', 'root', '-c', '/bin/systemctl status httpd.service']
).AndReturn(FakePOpen(returncode=-1))
self.mock_cmd_run(
['su', 'root', '-c', '/bin/systemctl start httpd.service']
).AndReturn(FakePOpen())
# monitor_services not running
self.mock_cmd_run(
['su', 'root', '-c', '/bin/systemctl status httpd.service']
).AndReturn(FakePOpen(returncode=-1))
self.mock_cmd_run(
['su', 'root', '-c', '/bin/systemctl start httpd.service']
).AndReturn(FakePOpen())
self.mock_cmd_run(
['su', 'root', '-c', '/bin/services_restarted']
).AndReturn(FakePOpen())
# monitor_services running
self.mock_cmd_run(
['su', 'root', '-c', '/bin/systemctl status httpd.service']
).AndReturn(FakePOpen())
self.m.ReplayAll()
services = {
"sysvinit": {
"httpd": {"enabled": "true", "ensureRunning": "true"}
}
}
hooks = [
cfn_helper.Hook(
'hook1',
'service.restarted',
'Resources.resource1.Metadata',
'root',
'/bin/services_restarted')
]
sh = cfn_helper.ServicesHandler(services, 'resource1', hooks)
sh.apply_services()
# services not running
sh.monitor_services()
# services running
sh.monitor_services()
self.m.VerifyAll()
def test_services_handler_sysv_disabled_systemctl(self):
self.m.StubOutWithMock(os.path, 'exists')
os.path.exists('/bin/systemctl').MultipleTimes().AndReturn(True)
# apply_services
self.mock_cmd_run(
['su', 'root', '-c', '/bin/systemctl disable httpd.service']
).AndReturn(FakePOpen())
self.mock_cmd_run(
['su', 'root', '-c', '/bin/systemctl status httpd.service']
).AndReturn(FakePOpen())
self.mock_cmd_run(
['su', 'root', '-c', '/bin/systemctl stop httpd.service']
).AndReturn(FakePOpen())
self.m.ReplayAll()
services = {
"sysvinit": {
"httpd": {"enabled": "false", "ensureRunning": "false"}
}
}
hooks = [
cfn_helper.Hook(
'hook1',
'service.restarted',
'Resources.resource1.Metadata',
'root',
'/bin/services_restarted')
]
sh = cfn_helper.ServicesHandler(services, 'resource1', hooks)
sh.apply_services()
self.m.VerifyAll()
def test_services_handler_sysv_service_updaterc(self):
self.m.StubOutWithMock(os.path, 'exists')
os.path.exists('/bin/systemctl').MultipleTimes().AndReturn(False)
os.path.exists('/sbin/service').MultipleTimes().AndReturn(False)
os.path.exists('/sbin/chkconfig').MultipleTimes().AndReturn(False)
# apply_services
self.mock_cmd_run(
['su', 'root', '-c', '/usr/sbin/update-rc.d httpd enable']
).AndReturn(FakePOpen())
self.mock_cmd_run(
['su', 'root', '-c', '/usr/sbin/service httpd status']
).AndReturn(FakePOpen(returncode=-1))
self.mock_cmd_run(
['su', 'root', '-c', '/usr/sbin/service httpd start']
).AndReturn(FakePOpen())
# monitor_services not running
self.mock_cmd_run(
['su', 'root', '-c', '/usr/sbin/service httpd status']
).AndReturn(FakePOpen(returncode=-1))
self.mock_cmd_run(
['su', 'root', '-c', '/usr/sbin/service httpd start']
).AndReturn(FakePOpen())
self.mock_cmd_run(
['su', 'root', '-c', '/bin/services_restarted']
).AndReturn(FakePOpen())
# monitor_services running
self.mock_cmd_run(
['su', 'root', '-c', '/usr/sbin/service httpd status']
).AndReturn(FakePOpen())
self.m.ReplayAll()
services = {
"sysvinit": {
"httpd": {"enabled": "true", "ensureRunning": "true"}
}
}
hooks = [
cfn_helper.Hook(
'hook1',
'service.restarted',
'Resources.resource1.Metadata',
'root',
'/bin/services_restarted')
]
sh = cfn_helper.ServicesHandler(services, 'resource1', hooks)
sh.apply_services()
# services not running
sh.monitor_services()
# services running
sh.monitor_services()
self.m.VerifyAll()
def test_services_handler_sysv_disabled_service_updaterc(self):
self.m.StubOutWithMock(os.path, 'exists')
os.path.exists('/bin/systemctl').MultipleTimes().AndReturn(False)
os.path.exists('/sbin/service').MultipleTimes().AndReturn(False)
os.path.exists('/sbin/chkconfig').MultipleTimes().AndReturn(False)
# apply_services
self.mock_cmd_run(
['su', 'root', '-c', '/usr/sbin/update-rc.d httpd disable']
).AndReturn(FakePOpen())
self.mock_cmd_run(
['su', 'root', '-c', '/usr/sbin/service httpd status']
).AndReturn(FakePOpen())
self.mock_cmd_run(
['su', 'root', '-c', '/usr/sbin/service httpd stop']
).AndReturn(FakePOpen())
self.m.ReplayAll()
services = {
"sysvinit": {
"httpd": {"enabled": "false", "ensureRunning": "false"}
}
}
hooks = [
cfn_helper.Hook(
'hook1',
'service.restarted',
'Resources.resource1.Metadata',
'root',
'/bin/services_restarted')
]
sh = cfn_helper.ServicesHandler(services, 'resource1', hooks)
sh.apply_services()
self.m.VerifyAll()
class TestHupConfig(MockPopenTestCase):
def test_load_main_section(self):
fcreds = tempfile.NamedTemporaryFile()
fcreds.write('AWSAccessKeyId=foo\nAWSSecretKey=bar\n')
fcreds.flush()
main_conf = tempfile.NamedTemporaryFile()
main_conf.write('''[main]
stack=teststack
credential-file=%s''' % fcreds.name)
main_conf.flush()
mainconfig = cfn_helper.HupConfig([open(main_conf.name)])
self.assertEqual(
'{stack: teststack, credential_file: %s, '
'region: nova, interval:10}' % fcreds.name,
str(mainconfig))
main_conf.close()
main_conf = tempfile.NamedTemporaryFile()
main_conf.write('''[main]
stack=teststack
region=region1
credential-file=%s-invalid
interval=120''' % fcreds.name)
main_conf.flush()
e = self.assertRaises(Exception, cfn_helper.HupConfig,
[open(main_conf.name)])
self.assertIn('invalid credentials file', str(e))
fcreds.close()
def test_hup_config(self):
self.mock_cmd_run(
['su', 'root', '-c', '/bin/cfn-http-restarted']).AndReturn(
FakePOpen('All good'))
self.mock_cmd_run(['su', 'root', '-c', '/bin/hook1']).AndReturn(
FakePOpen('All good'))
self.mock_cmd_run(['su', 'root', '-c', '/bin/hook2']).AndReturn(
FakePOpen('All good'))
self.mock_cmd_run(['su', 'root', '-c', '/bin/hook3']).AndReturn(
FakePOpen('All | |
m.fp_text:
if t[0] == 'reference':
ref = t[1]
break;
m_at,m_angle = getAt(m.at)
pads = []
count += len(m.pad)
for j,p in enumerate(m.pad):
layers = [unquote(s) for s in p.layers]
if self.layer not in layers \
and layer_match not in layers \
and '*' not in layers:
skip_count+=1
continue
if self.filterNets(p):
skip_count+=1
continue
shape = p[2]
try:
make_shape = globals()['make_{}'.format(shape)]
except KeyError:
raise NotImplementedError(
'pad shape {} not implemented\n'.format(shape))
w = make_shape(Vector(*p.size),p)
at,angle = getAt(p.at)
angle -= m_angle;
if not isZero(angle):
w.rotate(Vector(),Vector(0,0,1),angle)
w.translate(at)
if not self.merge_pads:
pads.append(func(w,'pad',
'{}#{}#{}#{}#{}'.format(i,j,p[0],ref,self.netName(p))))
else:
pads.append(w)
if not pads:
continue
if not self.merge_pads:
obj = self._makeCompound(pads,'pads','{}#{}'.format(i,ref))
else:
obj = func(pads,'pads','{}#{}'.format(i,ref))
self._place(obj,m_at,m_angle)
objs.append(obj)
via_skip = 0
vias = []
for i,v in enumerate(self.pcb.via):
layers = [unquote(s) for s in v.layers]
if self.layer not in layers or self.filterNets(v):
via_skip += 1
continue
w = make_circle(Vector(v.size))
w.translate(makeVect(v.at))
if not self.merge_vias:
vias.append(func(w,'via','{}#{}'.format(i,v.size)))
else:
vias.append(w)
if vias:
if self.merge_vias:
objs.append(func(vias,'vias'))
else:
objs.append(self._makeCompound(vias,'vias'))
self._log('modules: {}',len(self.pcb.module))
self._log('pads: {}, skipped: {}',count,skip_count)
self._log('vias: {}, skipped: {}',len(self.pcb.via),via_skip)
self._log('total pads added: {}',
count-skip_count+len(self.pcb.via)-via_skip)
if objs:
objs = self._cutHoles(objs,holes,'pads',fit_arcs=fit_arcs)
if shape_type=='solid':
objs = self._makeSolid(objs,'pads', thickness,
fit_arcs = fit_arcs)
else:
objs = self._makeCompound(objs,'pads',
fuse=True,fit_arcs=fit_arcs)
self.setColor(objs,'pad')
self._popLog('pads done')
fitView();
return objs
def setColor(self,obj,otype):
if not self.add_feature:
return
try:
color = self.colors[otype][self.layer_type]
except KeyError:
color = self.colors[otype][0]
if hasattr(obj.ViewObject,'MapFaceColor'):
obj.ViewObject.MapFaceColor = False
obj.ViewObject.ShapeColor = color
def makeTracks(self,shape_type='face',fit_arcs=True,
thickness=0.05,holes=False,prefix=''):
self._pushLog('making tracks...',prefix=prefix)
width = 0
def _line(edges,label,offset=0,fill=False):
wires = findWires(edges)
return self._makeWires(wires,'track', offset=offset,
fill=fill, label=label, workplane=True)
def _wire(edges,label,fill=False):
return _line(edges,label,width*0.5,fill)
def _face(edges,label):
return _wire(edges,label,True)
_solid = _face
try:
func = locals()['_{}'.format(shape_type)]
except KeyError:
raise ValueError('invalid shape type: {}'.format(shape_type))
tracks = defaultdict(lambda: defaultdict(list))
count = 0
for s in self.pcb.segment:
if self.filterNets(s):
continue
if unquote(s.layer) == self.layer:
if self.merge_tracks:
tracks[''][s.width].append(s)
else:
tracks[self.netName(s)][s.width].append(s)
count += 1
objs = []
i = 0
for (name,sss) in iteritems(tracks):
for (width,ss) in iteritems(sss):
self._log('making {} tracks {} of width {:.2f}, ({}/{})',
len(ss),name,width,i,count)
i+=len(ss)
edges = []
for s in ss:
if s.start != s.end:
edges.append(Part.makeLine(
makeVect(s.start),makeVect(s.end)))
else:
self._log('Line (Track) through identical points {}',
s.start, level="warning")
if self.merge_tracks:
label = '{}'.format(width)
else:
label = '{}#{}'.format(width,name)
objs.append(func(edges,label=label))
if objs:
objs = self._cutHoles(objs,holes,'tracks',fit_arcs=fit_arcs)
if shape_type == 'solid':
objs = self._makeSolid(objs,'tracks',thickness,
fit_arcs=fit_arcs)
else:
objs = self._makeCompound(objs,'tracks',fuse=True,
fit_arcs=fit_arcs)
self.setColor(objs,'track')
self._popLog('tracks done')
fitView();
return objs
def makeZones(self,shape_type='face',thickness=0.05, fit_arcs=True,
holes=False,prefix=''):
self._pushLog('making zones...',prefix=prefix)
z = None
zone_holes = []
def _wire(obj,fill=False):
# NOTE: It is weird that kicad_pcb's zone fillpolygon is 0.127mm
# thinner than the actual copper region shown in pcbnew or the
# generated gerber. Why is this so? Is this 0.127 hardcoded or
# related to some setup parameter? I am guessing this is half the
# zone.min_thickness setting here.
if not zone_holes or (
self.add_feature and self.make_sketch and self.zone_merge_holes):
obj = [obj]+zone_holes
elif zone_holes:
obj = (self._makeWires(obj,'zone_outline', label=z.net_name),
self._makeWires(zone_holes,'zone_hole',label=z.net_name))
return self._makeArea(obj,'zone',offset=z.min_thickness*0.5,
op=1, fill=fill,label=z.net_name)
return self._makeWires(obj,'zone',fill=fill,
offset=z.min_thickness*0.5,label=z.net_name)
def _face(obj):
return _wire(obj,True)
_solid = _face
try:
func = locals()['_{}'.format(shape_type)]
except KeyError:
raise ValueError('invalid shape type: {}'.format(shape_type))
objs = []
for z in self.pcb.zone:
if unquote(z.layer) != self.layer or self.filterNets(z):
continue
count = len(z.filled_polygon)
self._pushLog('making zone {}...', z.net_name)
for idx,p in enumerate(z.filled_polygon):
zone_holes = []
table = {}
pts = SexpList(p.pts.xy)
# close the polygon
pts._append(p.pts.xy._get(0))
# `table` uses a pair of vertex as the key to store the index of
# an edge.
for i in range(len(pts)-1):
table[str((pts[i],pts[i+1]))] = i
# This is how kicad represents holes in zone polygon
# ---------------------------
# | ----- ---- |
# | | |======| | |
# |====| | | | |
# | ----- ---- |
# | |
# ---------------------------
# It uses a single polygon with coincide edges of oppsite
# direction (shown with '=' above) to dig a hole. And one hole
# can lead to another, and so forth. The following `build()`
# function is used to recursively discover those holes, and
# cancel out those '=' double edges, which will surely cause
# problem if left alone. The algorithm assumes we start with a
# point of the outer polygon.
def build(start,end):
results = []
while start<end:
# We used the reverse edge as key to search for an
# identical edge of oppsite direction. NOTE: the
# algorithm only works if the following assumption is
# true, that those hole digging double edges are of
# equal length without any branch in the middle
key = str((pts[start+1],pts[start]))
try:
i = table[key]
del table[key]
except KeyError:
# `KeyError` means its a normal edge, add the line.
results.append(Part.makeLine(
makeVect(pts[start]),makeVect(pts[start+1])))
start += 1
continue
# We found the start of a double edge, treat all edges
# in between as holes and recurse. Both of the double
# edges are skipped.
h = build(start+1,i)
if h:
zone_holes.append(Part.Wire(h))
start = i+1
return results
edges = build(0,len(pts)-1)
self._log('region {}/{}, holes: {}',idx+1,count,len(zone_holes))
objs.append(func(Part.Wire(edges)))
self._popLog()
if objs:
objs = self._cutHoles(objs,holes,'zones')
if shape_type == 'solid':
objs = self._makeSolid(objs,'zones',thickness,fit_arcs=fit_arcs)
else:
objs = self._makeCompound(objs,'zones',
fuse=holes,fit_arcs=fit_arcs)
self.setColor(objs,'zone')
self._popLog('zones done')
fitView();
return objs
def isBottomLayer(self):
return self.layer_type == 31
def makeCopper(self,shape_type='face',thickness=0.05,fit_arcs=True,
holes=False, z=0, prefix='',fuse=False):
self._pushLog('making copper layer {}...',self.layer,prefix=prefix)
holes = self._cutHoles(None,holes,None)
objs = []
if shape_type=='solid':
solid = True
sub_fit_arcs = fit_arcs
if fuse:
shape_type = 'face'
else:
solid = False
sub_fit_arcs = False
for (name,offset) in (('Pads',thickness),
('Tracks',0.5*thickness),
('Zones',0)):
obj = getattr(self,'make{}'.format(name))(fit_arcs=sub_fit_arcs,
holes=holes,shape_type=shape_type,prefix=None,
thickness=thickness)
if not obj:
continue
if shape_type=='solid':
ofs = offset if self.layer_type < 16 else -offset
self._place(obj,Vector(0,0,ofs))
objs.append(obj)
if not objs:
return
if shape_type=='solid':
self._log("making solid")
obj = self._makeCompound(objs,'copper')
self._log("done solid")
else:
obj = self._makeArea(objs,'copper',fit_arcs=fit_arcs)
self.setColor(obj,'copper')
if solid:
self._log("making solid")
obj = self._makeSolid(obj,'copper',thickness)
self._log("done solid")
self.setColor(obj,'copper')
self._place(obj,Vector(0,0,z))
self._popLog('done copper layer {}',self.layer)
fitView();
return obj
def makeCoppers(self,shape_type='face',fit_arcs=True,prefix='',
holes=False,board_thickness=None,thickness=0.05,fuse=False):
self._pushLog('making all copper layers...',prefix=prefix)
layer_save = self.layer
objs = []
layers = []
for i in range(0,32):
if str(i) in self.pcb.layers:
layers.append(i)
if not layers:
raise ValueError('no copper layer found')
if not board_thickness:
board_thickness = self.pcb.general.thickness
z = board_thickness
if len(layers) == 1:
z_step = 0
else:
z_step = (z+thickness)/(len(layers)-1)
if not holes:
hole_shapes = None
elif fuse:
# make only npth holes
hole_shapes = self._cutHoles(None,holes,None,npth=1)
else:
hole_shapes = self._cutHoles(None,holes,None)
try:
for layer in layers:
self.setLayer(layer)
copper = self.makeCopper(shape_type,thickness,fit_arcs=fit_arcs,
holes=hole_shapes,z=z,prefix=None,fuse=fuse)
if copper:
objs.append(copper)
z -= z_step
finally:
self.setLayer(layer_save)
if not objs:
self._popLog('no copper found')
return
if shape_type=='solid' and fuse:
# make copper for plated through holes
hole_coppers = self.makeHoles(shape_type='solid',prefix=None,
oval=True,npth=-1,thickness=board_thickness+thickness)
if hole_coppers:
self.setColor(hole_coppers,'copper')
self._place(hole_coppers,FreeCAD.Vector(0,0,-thickness*0.5))
objs.append(hole_coppers);
# connect coppers with pad with plated through holes, and fuse
objs = self._makeFuse(objs,'coppers')
self.setColor(objs,'copper')
if holes:
# make plated through holes with inward offset
drills = self.makeHoles(shape_type='solid',prefix=None,
thickness=board_thickness+6*thickness,
oval=True,npth=-1,offset=thickness)
if drills:
self._place(drills,FreeCAD.Vector(0,0,-thickness*2))
objs = self._makeCut(objs,drills,'coppers')
self.setColor(objs,'copper')
self._popLog('done making all copper layers')
fitView();
return objs
def loadParts(self,z=0,combo=False,prefix=''):
if not os.path.isdir(self.part_path):
raise Exception('cannot find kicad package3d directory')
self._pushLog('loading parts on layer {}...',self.layer,prefix=prefix)
self._log('Kicad package3d path: {}',self.part_path)
at_bottom = self.isBottomLayer()
if z == 0:
if at_bottom:
z = -0.1
else:
z = self.pcb.general.thickness + 0.1
if self.add_feature or combo:
parts = []
else:
parts = {}
for (module_idx,m) in enumerate(self.pcb.module):
if unquote(m.layer) != self.layer:
continue
ref = '?'
value = '?'
for t in m.fp_text:
if t[0] == 'reference':
ref = t[1]
if t[0] == 'value':
value = t[1]
m_at,m_angle = getAt(m.at)
m_at += Vector(0,0,z)
objs = []
for (model_idx,model) in enumerate(m.model):
path = os.path.splitext(model[0])[0]
self._log('loading model {}/{} {} {} {}...',
model_idx,len(m.model), ref,value,model[0])
for e in ('.stp','.STP','.step','.STEP'):
filename = os.path.join(self.part_path,path+e)
mobj = loadModel(filename)
if not mobj:
continue
at = product(Vector(*model.at.xyz),Vector(25.4,25.4,25.4))
rot = [-float(v) for v in reversed(model.rotate.xyz)]
pln = Placement(at,Rotation(*rot))
if not self.add_feature:
if combo:
obj = mobj[0].copy()
obj.Placement = pln
else:
obj = {'shape':mobj[0].copy(),'color':mobj[1]}
obj['shape'].Placement = pln
objs.append(obj)
else:
obj = self._makeObject('Part::Feature','model',
label='{}#{}#{}'.format(module_idx,model_idx,ref),
links='Shape',shape=mobj[0])
obj.ViewObject.DiffuseColor = mobj[1]
obj.Placement = pln
objs.append(obj)
self._log('loaded')
break
if not objs:
continue
pln = Placement(m_at,Rotation(Vector(0,0,1),m_angle))
if at_bottom:
pln = pln.multiply(Placement(Vector(),
Rotation(Vector(1,0,0),180)))
label = '{}#{}'.format(module_idx,ref)
if self.add_feature or combo:
obj = self._makeCompound(objs,'part',label,force=True)
obj.Placement = pln
parts.append(obj)
else:
parts[label] = {'pos':pln, 'models':objs}
if parts:
if combo:
parts = self._makeCompound(parts,'parts')
elif self.add_feature:
grp = self._makeObject('App::DocumentObjectGroup','parts')
| |
epoch end
dataloader_prefix = self.get_test_dataloader_prefix(dataloader_idx)
dataloader_logs = self.multi_test_epoch_end(
test_outputs, dataloader_idx=dataloader_idx
)
# If result was not provided, generate empty dict
dataloader_logs = dataloader_logs or {}
# Perform `test_loss` resolution first (if provided outside logs)
if "test_loss" in dataloader_logs:
if (
"test_loss" not in output_dict
and dataloader_idx == self._test_dl_idx
):
output_dict["test_loss"] = dataloader_logs["test_loss"]
# For every item in the result dictionary
for k, v in dataloader_logs.items():
# If the key is `log`
if k == "log":
# Parse every element of the log, and attach the prefix name of the data loader
log_dict = {}
for k_log, v_log in v.items():
# If we are logging the loss, but dont provide it at result level,
# store it twice - once in log and once in result level.
# Also mark log with prefix name to avoid log level clash with other data loaders
if (
k_log not in output_dict["log"]
and dataloader_idx == self._test_dl_idx
):
new_k_log = k_log
# Also insert duplicate key with prefix for ease of comparison / avoid name clash
log_dict[dataloader_prefix + k_log] = v_log
else:
# Simply prepend prefix to key and save
new_k_log = dataloader_prefix + k_log
log_dict[new_k_log] = v_log
# Update log storage of individual data loader
output_logs = output_dict.get("log", {})
output_logs.update(log_dict)
# Update global log storage
output_dict["log"] = output_logs
else:
# If any values are stored outside 'log', simply prefix name and store
new_k = dataloader_prefix + k
output_dict[new_k] = v
if "log" in output_dict:
self.log_dict(output_dict.pop("log"), on_epoch=True)
# return everything else
return output_dict
def multi_validation_epoch_end(
self, outputs: List[Dict[str, torch.Tensor]], dataloader_idx: int = 0
) -> Optional[Dict[str, Dict[str, torch.Tensor]]]:
"""
Adds support for multiple validation datasets. Should be overriden by subclass,
so as to obtain appropriate logs for each of the dataloaders.
Args:
outputs: Same as that provided by LightningModule.validation_epoch_end()
for a single dataloader.
dataloader_idx: int representing the index of the dataloader.
Returns:
A dictionary of values, optionally containing a sub-dict `log`,
such that the values in the log will be pre-pended by the dataloader prefix.
"""
logging.warning(
"Multi data loader support has been enabled, but "
"`multi_validation_epoch_end(outputs, dataloader_idx) has not been implemented.\n"
"If you require multi data loader support for validation sets, please override this method.\n"
"If you do not require multi data loader support, please instead override "
"`validation_epoch_end(outputs)."
)
def multi_test_epoch_end(
self, outputs: List[Dict[str, torch.Tensor]], dataloader_idx: int = 0
) -> Optional[Dict[str, Dict[str, torch.Tensor]]]:
"""
Adds support for multiple test datasets. Should be overriden by subclass,
so as to obtain appropriate logs for each of the dataloaders.
Args:
outputs: Same as that provided by LightningModule.validation_epoch_end()
for a single dataloader.
dataloader_idx: int representing the index of the dataloader.
Returns:
A dictionary of values, optionally containing a sub-dict `log`,
such that the values in the log will be pre-pended by the dataloader prefix.
"""
logging.warning(
"Multi data loader support has been enabled, but "
"`multi_test_epoch_end(outputs, dataloader_idx) has not been implemented.\n"
"If you require multi data loader support for validation sets, please override this method.\n"
"If you do not require multi data loader support, please instead override "
"`test_epoch_end(outputs)."
)
def get_validation_dataloader_prefix(self, dataloader_idx: int = 0) -> str:
"""
Get the name of one or more data loaders, which will be prepended to all logs.
Args:
dataloader_idx: Index of the data loader.
Returns:
str name of the data loader at index provided.
"""
return self._validation_names[dataloader_idx]
def get_test_dataloader_prefix(self, dataloader_idx: int = 0) -> str:
"""
Get the name of one or more data loaders, which will be prepended to all logs.
Args:
dataloader_idx: Index of the data loader.
Returns:
str name of the data loader at index provided.
"""
return self._test_names[dataloader_idx]
@rank_zero_only
def maybe_init_from_pretrained_checkpoint(
self, cfg: OmegaConf, map_location: str = "cpu"
):
"""
Initializes a given model with the parameters obtained via specific config arguments.
The state dict of the provided model will be updated with `strict=False` setting so as to prevent
requirement of exact model parameters matching.
Initializations:
init_from_nemo_model: Str path to a .nemo model, which will be instantiated in order
to extract the state dict.
init_from_pretrained_model: Str name of a pretrained model checkpoint (obtained via cloud).
The model will be downloaded (or a cached copy will be used), instantiated and then
its state dict will be extracted.
init_from_ptl_ckpt: Str name of a Pytorch Lightning checkpoint file. It will be loaded and
the state dict will extracted.
Args:
cfg: The config used to instantiate the model. It need only contain one of the above keys.
map_location: str or torch.device() which represents where the intermediate state dict
(from the pretrained model or checkpoint) will be loaded.
"""
args = [
"init_from_nemo_model",
"init_from_pretrained_model",
"init_from_ptl_ckpt",
]
arg_matches = [(1 if arg in cfg and arg is not None else 0) for arg in args]
if sum(arg_matches) == 0:
# model weights do not need to be restored
return
if sum(arg_matches) > 1:
raise ValueError(
f"Cannot pass more than one model initialization arguments to config!\n"
f"Found : {[args[idx] for idx, arg_present in enumerate(arg_matches) if arg_present]}"
)
if "init_from_nemo_model" in cfg and cfg.init_from_nemo_model is not None:
with open_dict(cfg):
# Restore model
model_path = cfg.pop("init_from_nemo_model")
restored_model = self.restore_from(
model_path, map_location=map_location, strict=True
)
# Restore checkpoint into current model
self.load_state_dict(restored_model.state_dict(), strict=False)
logging.info(
f"Model checkpoint restored from nemo file with path : `{model_path}`"
)
del restored_model
if (
"init_from_pretrained_model" in cfg
and cfg.init_from_pretrained_model is not None
):
with open_dict(cfg):
# Restore model
model_name = cfg.pop("init_from_pretrained_model")
# Check if model is being resumed or not - only works if `Trainer` is attached to model
if hasattr(self, "trainer") and self.trainer is not None:
trainer = self.trainer
if (
hasattr(trainer, "resume_from_checkpoint")
and trainer.resume_from_checkpoint is not None
):
logging.info(
"Model training is being resumed via Pytorch Lightning.\n"
"Initialization from pretrained model (via cloud) will be skipped."
)
return
restored_model = self.from_pretrained(
model_name, map_location=map_location, strict=True
)
# Restore checkpoint into current model
self.load_state_dict(restored_model.state_dict(), strict=False)
logging.info(
f"Model checkpoint restored from pretrained chackpoint with name : `{model_name}`"
)
del restored_model
if "init_from_ptl_ckpt" in cfg and cfg.init_from_ptl_ckpt is not None:
with open_dict(cfg):
# Restore checkpoint
ckpt_path = cfg.pop("init_from_ptl_ckpt")
ckpt = torch.load(ckpt_path, map_location=map_location)
# Restore checkpoint into current model
self.load_state_dict(ckpt["state_dict"], strict=False)
logging.info(
f"Model checkpoint restored from pytorch lightning chackpoint with path : `{ckpt_path}`"
)
del ckpt
def teardown(self, stage: str):
"""
Called at the end of fit and test.
Args:
stage: either 'fit' or 'test'
"""
if stage == "fit":
# Update env variable to bypass multi gpu issue after training
# This fix affects usage of trainer.test() after trainer.train()
# If trainer.train() was done on multiple GPUs, then trainer.test()
# will try to do ddp, even if its a new Trainer object with just 1 GPU.
# Temporary patch to fix that
if "PL_TRAINER_GPUS" in os.environ:
os.environ.pop("PL_TRAINER_GPUS")
super().teardown(stage)
@classmethod
def extract_state_dict_from(
cls, restore_path: str, save_dir: str, split_by_module: bool = False
):
"""
Extract the state dict(s) from a provided .nemo tarfile and save it to a directory.
Args:
restore_path: path to .nemo file from which state dict(s) should be extracted
save_dir: directory in which the saved state dict(s) should be stored
split_by_module: bool flag, which determins whether the output checkpoint should
be for the entire Model, or the individual module's that comprise the Model
Example:
To convert the .nemo tarfile into a single Model level PyTorch checkpoint
::
state_dict = nemo.collections.asr.models.EncDecCTCModel.extract_state_dict_from('asr.nemo', './asr_ckpts')
To restore a model from a Model level checkpoint
::
model = nemo.collections.asr.models.EncDecCTCModel(cfg) # or any other method of restoration
model.load_state_dict(torch.load("./asr_ckpts/model_weights.ckpt"))
To convert the .nemo tarfile into multiple Module level PyTorch checkpoints
::
state_dict = nemo.collections.asr.models.EncDecCTCModel.extract_state_dict_from('asr.nemo', './asr_ckpts', split_by_module=True)
To restore a module from a Module level checkpoint
::
model = nemo.collections.asr.models.EncDecCTCModel(cfg) # or any other method of restoration
# load the individual components
model.preprocessor.load_state_dict(torch.load("./asr_ckpts/preprocessor.ckpt"))
model.encoder.load_state_dict(torch.load("./asr_ckpts/encoder.ckpt"))
model.decoder.load_state_dict(torch.load("./asr_ckpts/decoder.ckpt"))
Returns:
The state dict that was loaded from the original .nemo checkpoint
"""
if not path.exists(restore_path):
raise FileExistsError(f"Can't find {restore_path}")
cwd = os.getcwd()
save_dir | |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for `schema_converter` module."""
from __future__ import absolute_import
from collections import OrderedDict
from typing import List, Union # pylint: disable=unused-import
import unittest
import avro
from apache_beam.io.gcp.internal.clients import bigquery
from vcf import parser
from vcf.parser import field_counts
from gcp_variant_transforms.beam_io import vcf_header_io
from gcp_variant_transforms.libs import bigquery_util
from gcp_variant_transforms.libs import schema_converter
from gcp_variant_transforms.libs import processed_variant
from gcp_variant_transforms.libs.bigquery_util import ColumnKeyConstants
from gcp_variant_transforms.libs.bigquery_util import TableFieldConstants
from gcp_variant_transforms.libs.variant_merge import variant_merge_strategy
from gcp_variant_transforms.testing import bigquery_schema_util
Format = parser._Format
Info = parser._Info
class _DummyVariantMergeStrategy(variant_merge_strategy.VariantMergeStrategy):
"""A dummy strategy. It just adds a new field to the schema."""
def modify_bigquery_schema(self, schema, info_keys):
schema.fields.append(bigquery.TableFieldSchema(
name='ADDED_BY_MERGER',
type=TableFieldConstants.TYPE_STRING,
mode=TableFieldConstants.MODE_NULLABLE))
class GenerateSchemaFromHeaderFieldsTest(unittest.TestCase):
"""Test cases for the ``generate_schema_from_header_fields`` function."""
def _validate_schema(self, expected_fields, actual_schema):
"""This can be overridden by child classes to do more validations.
This is called at the end of each test to verify that `actual_schema`
has all the `expected_fields`.
"""
self.assertEqual(expected_fields, _get_fields_from_schema(actual_schema))
def _generate_expected_fields(self, alt_fields=None, call_fields=None,
info_fields=None):
fields = [ColumnKeyConstants.REFERENCE_NAME,
ColumnKeyConstants.START_POSITION,
ColumnKeyConstants.END_POSITION,
ColumnKeyConstants.REFERENCE_BASES,
ColumnKeyConstants.ALTERNATE_BASES,
'.'.join([ColumnKeyConstants.ALTERNATE_BASES,
ColumnKeyConstants.ALTERNATE_BASES_ALT])]
fields.extend(
['.'.join([ColumnKeyConstants.ALTERNATE_BASES, a])
for a in alt_fields or []])
fields.extend([ColumnKeyConstants.NAMES,
ColumnKeyConstants.QUALITY,
ColumnKeyConstants.FILTER,
ColumnKeyConstants.CALLS,
'.'.join([ColumnKeyConstants.CALLS,
ColumnKeyConstants.CALLS_NAME]),
'.'.join([ColumnKeyConstants.CALLS,
ColumnKeyConstants.CALLS_GENOTYPE]),
'.'.join([ColumnKeyConstants.CALLS,
ColumnKeyConstants.CALLS_PHASESET])])
fields.extend(
['.'.join([ColumnKeyConstants.CALLS, c]) for c in call_fields or []])
fields.extend(info_fields or [])
return fields
def test_no_header_fields(self):
header_fields = vcf_header_io.VcfHeader()
self._validate_schema(
self._generate_expected_fields(),
schema_converter.generate_schema_from_header_fields(
header_fields,
processed_variant.ProcessedVariantFactory(header_fields)))
def test_info_header_fields(self):
infos = OrderedDict([
('I1', Info('I1', 1, 'String', 'desc', 'src', 'v')),
('I2', Info('I2', 2, 'Integer', 'desc', 'src', 'v')),
('IA', Info('IA', field_counts['A'], 'Float', 'desc', 'src', 'v')),
('IU', Info('IU', field_counts['.'], 'Character', 'desc', 'src', 'v')),
('IG', Info('IG', field_counts['G'], 'String', 'desc', 'src', 'v')),
('I0', Info('I0', 0, 'Flag', 'desc', 'src', 'v')),
('IA2', Info('IA2', field_counts['A'], 'Float', 'desc', 'src', 'v')),
('END', # END should not be included in the generated schema.
Info('END', 1, 'Integer', 'Special END key', 'src', 'v'))])
header_fields = vcf_header_io.VcfHeader(infos=infos)
self._validate_schema(
self._generate_expected_fields(
alt_fields=['IA', 'IA2'],
info_fields=['I1', 'I2', 'IU', 'IG', 'I0']),
schema_converter.generate_schema_from_header_fields(
header_fields,
processed_variant.ProcessedVariantFactory(header_fields)))
# Test with split_alternate_allele_info_fields=False.
actual_schema = (
schema_converter.generate_schema_from_header_fields(
header_fields,
processed_variant.ProcessedVariantFactory(
header_fields, split_alternate_allele_info_fields=False)))
self._validate_schema(
self._generate_expected_fields(
info_fields=['I1', 'I2', 'IA', 'IU', 'IG', 'I0', 'IA2']),
actual_schema)
# Verify types and modes.
expected_type_modes = {
'I1': (TableFieldConstants.TYPE_STRING,
TableFieldConstants.MODE_NULLABLE),
'I2': (TableFieldConstants.TYPE_INTEGER,
TableFieldConstants.MODE_REPEATED),
'IA': (TableFieldConstants.TYPE_FLOAT,
TableFieldConstants.MODE_REPEATED),
'IU': (TableFieldConstants.TYPE_STRING,
TableFieldConstants.MODE_REPEATED),
'IG': (TableFieldConstants.TYPE_STRING,
TableFieldConstants.MODE_REPEATED),
'I0': (TableFieldConstants.TYPE_BOOLEAN,
TableFieldConstants.MODE_NULLABLE),
'IA2': (TableFieldConstants.TYPE_FLOAT,
TableFieldConstants.MODE_REPEATED)}
for field in actual_schema.fields:
if field.name in expected_type_modes:
expected_type, expected_mode = expected_type_modes[field.name]
self.assertEqual(expected_type, field.type)
self.assertEqual(expected_mode, field.mode)
def test_info_and_format_header_fields(self):
infos = OrderedDict([
('I1', Info('I1', 1, 'String', 'desc', 'src', 'v')),
('IA', Info('IA', field_counts['A'], 'Integer', 'desc', 'src', 'v'))])
# GT and PS should not be set as they're already included in special
# 'genotype' and 'phaseset' fields.
formats = OrderedDict([
('F1', Format('F1', 1, 'String', 'desc')),
('F2', Format('F2', 2, 'Integer', 'desc')),
('FU', Format('FU', field_counts['.'], 'Float', 'desc')),
('GT', Format('GT', 2, 'Integer', 'Special GT key')),
('PS', Format('PS', 1, 'Integer', 'Special PS key'))])
header_fields = vcf_header_io.VcfHeader(infos=infos, formats=formats)
self._validate_schema(
self._generate_expected_fields(
alt_fields=['IA'],
call_fields=['F1', 'F2', 'FU'],
info_fields=['I1']),
schema_converter.generate_schema_from_header_fields(
header_fields,
processed_variant.ProcessedVariantFactory(header_fields)))
def test_bigquery_field_name_sanitize(self):
infos = OrderedDict([
('_', Info('_', 1, 'String', 'desc', 'src', 'v')),
('_A', Info('_A', 1, 'String', 'desc', 'src', 'v')),
('0a', Info('0a', 1, 'String', 'desc', 'src', 'v')),
('A-B*C', Info('A-B*C', 1, 'String', 'desc', 'src', 'v')),
('I-A', Info('I-A', field_counts['A'], 'Float', 'desc', 'src', 'v')),
('OK_info_09', Format('OK_info_09', 1, 'String', 'desc'))])
formats = OrderedDict([
('a^b', Format('a^b', 1, 'String', 'desc')),
('OK_format_09', Format('OK_format_09', 1, 'String', 'desc'))])
header_fields = vcf_header_io.VcfHeader(infos=infos, formats=formats)
self._validate_schema(
self._generate_expected_fields(
alt_fields=['I_A'],
call_fields=['a_b', 'OK_format_09'],
info_fields=['field__', 'field__A', 'field_0a', 'A_B_C',
'OK_info_09']),
schema_converter.generate_schema_from_header_fields(
header_fields,
processed_variant.ProcessedVariantFactory(header_fields)))
def test_variant_merger_modify_schema(self):
infos = OrderedDict([
('I1', Info('I1', 1, 'String', 'desc', 'src', 'v')),
('IA', Info('IA', field_counts['A'], 'Integer', 'desc', 'src', 'v'))])
formats = OrderedDict([('F1', Format('F1', 1, 'String', 'desc'))])
header_fields = vcf_header_io.VcfHeader(infos=infos, formats=formats)
self._validate_schema(
self._generate_expected_fields(
alt_fields=['IA'],
call_fields=['F1'],
info_fields=['I1', 'ADDED_BY_MERGER']),
schema_converter.generate_schema_from_header_fields(
header_fields,
processed_variant.ProcessedVariantFactory(header_fields),
variant_merger=_DummyVariantMergeStrategy()))
class ConvertTableSchemaToJsonAvroSchemaTest(
GenerateSchemaFromHeaderFieldsTest):
"""Test cases for `convert_table_schema_to_json_avro_schema`.
This basically works by extending GenerateSchemaFromHeaderFieldsTest such
that each BigQuery table schema that is generated by tests in that class,
are converted to Avro schema and verified in this class.
"""
def _validate_schema(self, expected_fields, actual_schema):
super(ConvertTableSchemaToJsonAvroSchemaTest, self)._validate_schema(
expected_fields, actual_schema)
avro_schema = avro.schema.parse(
schema_converter.convert_table_schema_to_json_avro_schema(
actual_schema))
self.assertEqual(expected_fields,
_get_fields_from_avro_type(avro_schema, ''))
class GenerateHeaderFieldsFromSchemaTest(unittest.TestCase):
"""Test cases for the `generate_header_fields_from_schema` function."""
def test_add_info_fields_from_alternate_bases_reserved_field(self):
alternate_bases_record_with_desc = bigquery.TableFieldSchema(
name=bigquery_util.ColumnKeyConstants.ALTERNATE_BASES,
type=bigquery_util.TableFieldConstants.TYPE_RECORD,
mode=bigquery_util.TableFieldConstants.MODE_REPEATED,
description='One record for each alternate base (if any).')
alternate_bases_record_with_desc.fields.append(bigquery.TableFieldSchema(
name='AF',
type=bigquery_util.TableFieldConstants.TYPE_FLOAT,
mode=bigquery_util.TableFieldConstants.MODE_NULLABLE,
description='bigquery desc'))
infos_with_desc = OrderedDict()
schema_converter._add_info_fields(
alternate_bases_record_with_desc, infos_with_desc)
expected_infos = OrderedDict([
('AF', Info('AF', field_counts['A'], 'Float', 'bigquery desc',
None, None))])
self.assertEqual(infos_with_desc, expected_infos)
alternate_bases_record_no_desc = bigquery.TableFieldSchema(
name=bigquery_util.ColumnKeyConstants.ALTERNATE_BASES,
type=bigquery_util.TableFieldConstants.TYPE_RECORD,
mode=bigquery_util.TableFieldConstants.MODE_REPEATED,
description='One record for each alternate base (if any).')
alternate_bases_record_no_desc.fields.append(bigquery.TableFieldSchema(
name='AF',
type=bigquery_util.TableFieldConstants.TYPE_FLOAT,
mode=bigquery_util.TableFieldConstants.MODE_NULLABLE,
description=''))
infos_no_desc = OrderedDict()
schema_converter._add_info_fields(
alternate_bases_record_no_desc, infos_no_desc)
expected_infos = OrderedDict([
('AF', Info('AF', field_counts['A'], 'Float',
'Allele frequency for each ALT allele in the same order '
'as listed (estimated from primary data, not called '
'genotypes', None, None))])
self.assertEqual(infos_no_desc, expected_infos)
def test_add_info_fields_from_alternate_bases_schema_compatibility(self):
schema_conflict_info = bigquery.TableFieldSchema(
name=bigquery_util.ColumnKeyConstants.ALTERNATE_BASES,
type=bigquery_util.TableFieldConstants.TYPE_RECORD,
mode=bigquery_util.TableFieldConstants.MODE_REPEATED,
description='One record for each alternate base (if any).')
schema_conflict_info.fields.append(bigquery.TableFieldSchema(
name='AF',
type=bigquery_util.TableFieldConstants.TYPE_INTEGER,
mode=bigquery_util.TableFieldConstants.MODE_NULLABLE,
description='desc'))
with self.assertRaises(ValueError):
schema_converter._add_info_fields(schema_conflict_info,
OrderedDict())
infos_allow_incompatible_schema = OrderedDict()
schema_converter._add_info_fields(
schema_conflict_info,
infos_allow_incompatible_schema,
allow_incompatible_schema=True)
expected_infos = OrderedDict([
('AF', Info('AF', field_counts['A'], 'Integer', 'desc', None, None))])
self.assertEqual(infos_allow_incompatible_schema, expected_infos)
def test_add_info_fields_from_alternate_bases_non_reserved_field(self):
alternate_bases_record = bigquery.TableFieldSchema(
name=bigquery_util.ColumnKeyConstants.ALTERNATE_BASES,
type=bigquery_util.TableFieldConstants.TYPE_RECORD,
mode=bigquery_util.TableFieldConstants.MODE_REPEATED,
description='One record for each alternate base (if any).')
alternate_bases_record.fields.append(bigquery.TableFieldSchema(
name='non_reserved',
type=bigquery_util.TableFieldConstants.TYPE_FLOAT,
mode=bigquery_util.TableFieldConstants.MODE_NULLABLE,
description='bigquery desc'))
infos = OrderedDict()
schema_converter._add_info_fields(
alternate_bases_record, infos)
expected_infos = OrderedDict([
('non_reserved', Info('non_reserved', field_counts['A'], 'Float',
'bigquery desc', None, None))])
self.assertEqual(infos, expected_infos)
def test_add_info_fields_reserved_field(self):
field_with_desc = bigquery.TableFieldSchema(
name='AA',
type=bigquery_util.TableFieldConstants.TYPE_STRING,
mode=bigquery_util.TableFieldConstants.MODE_NULLABLE,
description='bigquery desc')
infos = OrderedDict()
schema_converter._add_info_fields(field_with_desc, infos)
expected_infos = OrderedDict([
('AA', Info('AA', 1, 'String', 'bigquery desc', None, None))])
self.assertEqual(infos, expected_infos)
field_without_desc = bigquery.TableFieldSchema(
name='AA',
type=bigquery_util.TableFieldConstants.TYPE_STRING,
mode=bigquery_util.TableFieldConstants.MODE_NULLABLE,
description='')
infos = OrderedDict()
schema_converter._add_info_fields(field_without_desc, infos)
expected_infos = OrderedDict([
('AA', Info('AA', 1, 'String', 'Ancestral allele', None, None))])
self.assertEqual(infos, expected_infos)
def test_add_info_fields_reserved_field_schema_compatibility(self):
field_conflict_info_type = bigquery.TableFieldSchema(
name='AA',
type=bigquery_util.TableFieldConstants.TYPE_INTEGER,
mode=bigquery_util.TableFieldConstants.MODE_NULLABLE,
description='desc')
with self.assertRaises(ValueError):
schema_converter._add_info_fields(field_conflict_info_type,
OrderedDict())
field_conflict_info_format = bigquery.TableFieldSchema(
name='AA',
type=bigquery_util.TableFieldConstants.TYPE_STRING,
mode=bigquery_util.TableFieldConstants.MODE_REPEATED,
description='desc')
with self.assertRaises(ValueError):
schema_converter._add_info_fields(field_conflict_info_format,
OrderedDict())
info_allow_incompatible_schema = OrderedDict()
schema_converter._add_info_fields(
field_conflict_info_format,
info_allow_incompatible_schema,
allow_incompatible_schema=True)
expected_infos = OrderedDict([
('AA', Info('AA', field_counts['.'], 'String', 'desc', None, None))])
self.assertEqual(info_allow_incompatible_schema, expected_infos)
def test_add_info_fields_non_reserved_field(self):
non_reserved_field = bigquery.TableFieldSchema(
name='non_reserved_info',
type=bigquery_util.TableFieldConstants.TYPE_STRING,
mode=bigquery_util.TableFieldConstants.MODE_NULLABLE,
description='')
infos = OrderedDict()
schema_converter._add_info_fields(non_reserved_field, infos)
expected_infos = OrderedDict([
('non_reserved_info', Info('non_reserved_info', 1, 'String', '',
None, None))])
self.assertEqual(infos, expected_infos)
def test_add_format_fields_reserved_field(self):
calls_record_with_desc = bigquery.TableFieldSchema(
name=bigquery_util.ColumnKeyConstants.CALLS,
type=bigquery_util.TableFieldConstants.TYPE_RECORD,
mode=bigquery_util.TableFieldConstants.MODE_REPEATED,
description='One record for each call.')
calls_record_with_desc.fields.append(bigquery.TableFieldSchema(
name='GQ',
type=bigquery_util.TableFieldConstants.TYPE_INTEGER,
mode=bigquery_util.TableFieldConstants.MODE_NULLABLE,
description='bigquery desc'))
formats = OrderedDict()
schema_converter._add_format_fields(calls_record_with_desc,
formats)
expected_formats = OrderedDict([
('GQ', Format('GQ', 1, 'Integer', 'bigquery desc'))])
self.assertEqual(formats, expected_formats)
calls_record_without_desc = bigquery.TableFieldSchema(
name=bigquery_util.ColumnKeyConstants.CALLS,
type=bigquery_util.TableFieldConstants.TYPE_RECORD,
mode=bigquery_util.TableFieldConstants.MODE_REPEATED,
description='One record for each call.')
calls_record_without_desc.fields.append(bigquery.TableFieldSchema(
name='GQ',
type=bigquery_util.TableFieldConstants.TYPE_INTEGER,
mode=bigquery_util.TableFieldConstants.MODE_NULLABLE,
description=''))
formats = OrderedDict()
schema_converter._add_format_fields(calls_record_without_desc,
formats)
expected_formats = OrderedDict([
('GQ', Format('GQ', 1, 'Integer', 'Conditional genotype quality'))])
self.assertEqual(formats, expected_formats)
def test_add_format_fields_reserved_field_schema_compatibility(self):
schema_conflict_format = bigquery.TableSchema()
calls_record = bigquery.TableFieldSchema(
name=bigquery_util.ColumnKeyConstants.CALLS,
type=bigquery_util.TableFieldConstants.TYPE_RECORD,
mode=bigquery_util.TableFieldConstants.MODE_REPEATED,
description='One record for each call.')
calls_record.fields.append(bigquery.TableFieldSchema(
name='GQ',
type=bigquery_util.TableFieldConstants.TYPE_STRING,
mode=bigquery_util.TableFieldConstants.MODE_NULLABLE,
description='desc'))
schema_conflict_format.fields.append(calls_record)
with self.assertRaises(ValueError):
schema_converter.generate_header_fields_from_schema(
schema_conflict_format)
formats_allow_incompatible_schema = OrderedDict()
schema_converter._add_format_fields(
calls_record,
formats_allow_incompatible_schema,
allow_incompatible_schema=True)
expected_formats = OrderedDict([
('GQ', Format('GQ', 1, 'String', 'desc'))])
self.assertEqual(formats_allow_incompatible_schema, expected_formats)
def test_add_format_fields_non_reserved_field(self):
calls_record = bigquery.TableFieldSchema(
name=bigquery_util.ColumnKeyConstants.CALLS,
type=bigquery_util.TableFieldConstants.TYPE_RECORD,
mode=bigquery_util.TableFieldConstants.MODE_REPEATED,
description='One record for each call.')
calls_record.fields.append(bigquery.TableFieldSchema(
name='non_reserved_format',
type=bigquery_util.TableFieldConstants.TYPE_INTEGER,
mode=bigquery_util.TableFieldConstants.MODE_NULLABLE,
description='bigquery desc'))
formats = OrderedDict()
schema_converter._add_format_fields(calls_record, formats)
expected_formats = OrderedDict([
('non_reserved_format', Format('non_reserved_format', 1, 'Integer',
'bigquery desc'))])
self.assertEqual(formats, expected_formats)
def test_generate_header_fields_from_schema(self):
sample_schema = bigquery_schema_util.get_sample_table_schema()
header = schema_converter.generate_header_fields_from_schema(
sample_schema)
infos = OrderedDict([
('AF', Info('AF', field_counts['A'], 'Float', 'desc', None, None)),
('AA', Info('AA', 1, 'String', 'desc', None, None)),
('IFR', Info('IFR', field_counts['.'], 'Float', 'desc', None, None)),
('IS', Info('IS', 1, 'String', 'desc', None, None))])
formats = OrderedDict([
('FB', parser._Format('FB', 0, 'Flag', 'desc')),
('GQ', parser._Format('GQ', 1, 'Integer',
'desc'))])
expected_header = vcf_header_io.VcfHeader(infos=infos, formats=formats)
self.assertEqual(header, expected_header)
def test_generate_header_fields_from_schema_with_annotation(self):
sample_schema = bigquery_schema_util.get_sample_table_schema(
with_annotation_fields=True)
header = schema_converter.generate_header_fields_from_schema(
sample_schema)
infos = OrderedDict([
('AF', Info('AF', field_counts['A'], 'Float', 'desc', None, None)),
('CSQ', Info('CSQ', field_counts['.'], 'String',
'desc Format: Consequence|IMPACT', None, None)),
('AA', Info('AA', 1, 'String', 'desc', None, None)),
('IFR', Info('IFR', field_counts['.'], 'Float', 'desc', None, None)),
('IS', Info('IS', 1, 'String', 'desc', None, None))])
formats = OrderedDict([
('FB', parser._Format('FB', 0, 'Flag', 'desc')),
('GQ', parser._Format('GQ', 1, 'Integer',
'desc'))])
expected_header = vcf_header_io.VcfHeader(infos=infos, formats=formats)
self.assertEqual(header, expected_header)
def test_generate_header_fields_from_schema_date_type(self):
schema = bigquery.TableSchema()
schema.fields.append(bigquery.TableFieldSchema(
name='partition_date_please_ignore',
type='Date',
mode=bigquery_util.TableFieldConstants.MODE_NULLABLE,
description='Column required by BigQuery partitioning logic.'))
header = schema_converter.generate_header_fields_from_schema(
schema)
expected_header = vcf_header_io.VcfHeader(infos=OrderedDict(),
formats=OrderedDict())
self.assertEqual(header, expected_header)
def | |
#-*- coding: UTF-8 -*-
import re
open_id_pattern = re.compile("^\d{11}$")
def join_activity(mysql_conn, parameters):
open_id = parameters.get("id")
activity = parameters.get("activity")
code = "0"
response = {"id": open_id, "activity": activity}
result = {"kind": "joinActivity", "code": code, "response": response}
if open_id is None:
code = "-1"
error = "请输入手机号"
elif not open_id_pattern.match(open_id):
code = "-1"
error = "手机号格式错误"
if activity is None:
code = "-1"
error = "请输入赛事代码"
elif len(activity) != 4:
code = "-1"
error = "赛事代码应为4位"
if mysql_conn is None or not mysql_conn.is_connected():
code = "-1"
error = "系统内部错误"
if code == "-1":
response.update({"error": error})
result.update({"code": code, "response": response})
return result
mysql_conn.set_charset_collation('utf8')
mysql_conn.start_transaction()
cursor = mysql_conn.cursor()
sql = '''SELECT investorid FROM siminfo.t_investor WHERE openid = %s'''
cursor.execute(sql, (open_id,))
row = cursor.fetchone()
if row is None:
code = "-1"
error = "投资者尚未开户"
response.update({"error": error})
result.update({"code": code, "response": response})
else:
investor_id = str(row[0])
sql = '''SELECT activityid FROM siminfo.t_activity WHERE activityid = %s'''
cursor.execute(sql, (activity,))
row = cursor.fetchone()
if row is None:
code = "-1"
error = "赛事活动不存在"
response.update({"error": error})
result.update({"code": code, "response": response})
else:
sql = '''SELECT activityid, investorid, joindate FROM siminfo.t_activityinvestor WHERE activityid = %s AND investorid = %s'''
cursor.execute(sql, (activity, investor_id))
row = cursor.fetchone()
if row is None:
sql = """SELECT settlementgroupid FROM siminfo.t_activitysettlementgroup
WHERE activityid = %s AND settlementgroupid IN(
SELECT DISTINCT settlementgroupid FROM siminfo.t_activitysettlementgroup t
WHERE t.activityid IN
(SELECT t1.activityid FROM siminfo.t_activityinvestor t1, siminfo.t_activity t2
WHERE t1.investorid = %s AND t1.activityid = t2.activityid AND (t2.activitystatus = '0' OR t2.activitystatus = '1')))"""
cursor.execute(sql, (activity, investor_id))
cursor.fetchall()
if cursor.rowcount > 0:
code = "-1"
error = "投资者已参加其他相似类型赛事活动"
response.update({"error": error})
result.update({"code": code, "response": response})
else:
# 获取当前交易日
sql = """SELECT DISTINCT t1.tradingday FROM siminfo.t_tradesystemtradingday t1, siminfo.t_tradesystemsettlementgroup t2, siminfo.t_activitysettlementgroup t3
WHERE t1.tradesystemid = t2.tradesystemid AND t2.settlementgroupid = t3.settlementgroupid AND t3.activityid = %s"""
cursor.execute(sql, (activity,))
row = cursor.fetchone()
current_trading_day = str(row[0])
# 检查赛事活动状态
sql = """SELECT activitystatus, initialbalance FROM siminfo.t_activity WHERE activityid = %s"""
cursor.execute(sql, (activity,))
row = cursor.fetchone()
activity_status = str(row[0])
initial_balance = str(row[1])
join_status = '0'
# 检查投资者资金 持仓
if activity_status == '1':
sql = """SELECT t1.investorid FROM siminfo.t_investorfund t1
WHERE t1.brokersystemid = (SELECT DISTINCT t2.brokersystemid
FROM siminfo.t_activitysettlementgroup t1, siminfo.t_brokersystemsettlementgroup t2 WHERE t1.settlementgroupid = t2.settlementgroupid AND t1.activityid = %s)
AND t1.investorid = %s AND (t1.balance <> %s OR t1.available <> %s OR t1.currmargin <> 0 OR t1.profit <> 0 OR t1.stockvalue <> 0)
UNION
SELECT DISTINCT t2.investorid FROM siminfo.t_clientposition t1, siminfo.t_investorclient t2, (SELECT settlementgroupid FROM siminfo.t_activitysettlementgroup WHERE activityid = %s) t3
WHERE t2.investorid = %s AND t1.clientid = t2.clientid AND t1.settlementgroupid = t2.settlementgroupid AND t2.settlementgroupid = t3.settlementgroupid AND t1.position > 0"""
cursor.execute(sql, (activity,investor_id,initial_balance,initial_balance,activity,investor_id))
cursor.fetchall()
if cursor.rowcount == 0:
sql = """INSERT INTO siminfo.t_activityinvestorevaluation(ActivityID,InvestorID,InitialAsset,PreAsset,CurrentAsset,TotalReturnRate,ReturnRateOf1Day)
SELECT t2.activityid, t1.investorid, SUM(t1.balance) AS initialasset, SUM(t1.balance) AS preasset, SUM(t1.balance) AS currasset, 0, 0 FROM siminfo.t_investorfund t1,
(SELECT DISTINCT t1.activityid, t2.brokersystemid FROM siminfo.t_activitysettlementgroup t1, siminfo.t_brokersystemsettlementgroup t2 WHERE t1.activityid = %s AND t1.settlementgroupid = t2.settlementgroupid) t2
WHERE t1.investorid = %s AND t1.brokersystemid = t2.brokersystemid
GROUP BY t2.activityid, t1.investorid"""
cursor.execute(sql, (activity, investor_id))
join_status = '1'
sql = """INSERT INTO siminfo.t_activityinvestor(activityid, investorid, joindate, joinstatus) VALUES(%s, %s, DATE_FORMAT(NOW(), '%Y%m%d'), %s)"""
cursor.execute(sql, (activity, investor_id, join_status))
if cursor.rowcount == 0:
code = "-1"
error = "参加赛事活动失败"
response.update({"error": error})
result.update({"code": code, "response": response})
mysql_conn.commit()
return result
def query_activity_ranking(mysql_conn, parameters):
activity_id = parameters.get("activity")
investor_id = parameters.get("investor")
query_type = parameters.get("type")
query_count = parameters.get("count")
code = "0"
response = {"activity": activity_id, "investor": investor_id, "type": query_type, "count": query_count}
result = {"kind": "queryActivityRanking", "code": code, "response": response}
if activity_id is None:
code = "-1"
error = "请输入赛事编号"
elif len(activity_id) != 4:
code = "-1"
error = "赛事代码应为4位"
if query_type not in ['00', '01', '99']:
code = "-1"
error = "查询类型仅支持00、01、99"
if query_type == '99' and activity_id is None:
code = "-1"
error = "请输入投资者代码"
if query_count is None:
query_count = 30
if mysql_conn is None or not mysql_conn.is_connected():
code = "-1"
error = "系统内部错误"
if code == "-1":
response.update({"error": error})
result.update({"code": code, "response": response})
return result
mysql_conn.set_charset_collation('utf8')
cursor = mysql_conn.cursor()
if investor_id is not None and investor_id != "":
sql = '''SELECT investorid FROM siminfo.t_investor WHERE investorid = %s'''
cursor.execute(sql, (investor_id,))
row = cursor.fetchone()
if row is None:
code = "-1"
error = "投资者尚未开户"
response.update({"error": error})
result.update({"code": code, "response": response})
return result
rows = None
if query_type == '99' and investor_id is not None and investor_id != "":
sql = """SELECT t.investorid, t1.investorname, t.initialasset, t.preasset, t.currentasset, ROUND(t.totalreturnrate, 4), ROUND(t.returnrateof1day, 4), t.rankingstatus, t.preranking, t.ranking
FROM siminfo.t_activityinvestorevaluation t, siminfo.t_investor t1
WHERE t.activityid = %s AND t.investorid = %s AND t.investorid = t1.investorid"""
cursor.execute(sql, (activity_id, investor_id,))
rows = cursor.fetchall()
if query_type == '00':
if investor_id is not None and investor_id != "":
sql = """SELECT t.investorid, t1.investorname, t.initialasset, t.preasset, t.currentasset, ROUND(t.totalreturnrate, 4), ROUND(t.returnrateof1day, 4), t.rankingstatus, t.preranking, t.ranking
FROM siminfo.t_activityinvestorevaluation t, siminfo.t_investor t1
WHERE t.activityid = %s AND ((t.rankingstatus = '1' AND (t.ranking <= %s OR %s = '0')) OR t.investorid = %s) AND t.investorid = t1.investorid
ORDER BY t.rankingstatus DESC, t.ranking"""
cursor.execute(sql, (activity_id, query_count, query_count, investor_id))
rows = cursor.fetchall()
else:
sql = """SELECT t.investorid, t1.investorname, t.initialasset, t.preasset, t.currentasset, ROUND(t.totalreturnrate, 4), ROUND(t.returnrateof1day, 4), t.rankingstatus, t.preranking, t.ranking
FROM siminfo.t_activityinvestorevaluation t, siminfo.t_investor t1
WHERE t.activityid = %s AND t.rankingstatus = '1' AND (t.ranking <= %s OR %s = '0') AND t.investorid = t1.investorid
ORDER BY t.rankingstatus DESC, t.ranking"""
cursor.execute(sql, (activity_id, query_count, query_count))
rows = cursor.fetchall()
if query_type == '01':
if investor_id is not None and investor_id != "":
sql = """SELECT t.investorid, t1.investorname, t.initialasset, t.preasset, t.currentasset, ROUND(t.totalreturnrate, 4), ROUND(t.returnrateof1day, 4), t.rankingstatus, 0 as preranking, t.newranking AS ranking
FROM (SELECT t.* FROM
(SELECT t.*, (@i:=@i+1) AS newranking FROM siminfo.t_activityinvestorevaluation t,(SELECT @i:=0) AS it
WHERE t.activityid = %s AND t.rankingstatus = '1'
ORDER BY t.returnrateof1day DESC, t.totalreturnrate DESC, t.currentasset DESC, t.investorid) t WHERE t.newranking <= %s OR %s = '0'
UNION ALL
SELECT t.*, 0 AS newranking FROM siminfo.t_activityinvestorevaluation t
WHERE t.activityid = %s AND t.rankingstatus = '0' AND t.investorid = %s
) t, siminfo.t_investor t1 WHERE t.investorid = t1.investorid"""
cursor.execute(sql, (activity_id, query_count, query_count, activity_id, investor_id))
rows = cursor.fetchall()
else:
sql = """SELECT t.investorid, t1.investorname, t.initialasset, t.preasset, t.currentasset, ROUND(t.totalreturnrate, 4), ROUND(t.returnrateof1day, 4), t.rankingstatus, 0 as preranking, t.newranking AS ranking
FROM (SELECT t.*, (@i:=@i+1) AS newranking FROM siminfo.t_activityinvestorevaluation t,(SELECT @i:=0) AS it
WHERE t.activityid = %s AND t.rankingstatus = '1'
ORDER BY t.returnrateof1day DESC, t.totalreturnrate DESC, t.currentasset DESC, t.investorid) t, siminfo.t_investor t1 WHERE (t.newranking <= %s OR %s = '0') AND t.investorid = t1.investorid"""
cursor.execute(sql, (activity_id, query_count, query_count))
rows = cursor.fetchall()
data = []
if rows is not None:
for row in rows:
data.append({"investorId": str(row[0]),"investorName": str(row[1]),"initialAsset": str(row[2]),"preAsset": str(row[3]),
"currentAsset": str(row[4]),"totalReturnRate": str(row[5]),"returnRateOf1Day": str(row[6]),"rankingStatus": str(int(row[7])),
"preRanking": str(int(row[8])),"ranking": str(int(row[9]))})
response.update({"data": data})
result.update({"code": code, "response": response})
return result
def query_activity_joinstatus(mysql_conn, parameters):
activity_id = parameters.get("activity")
open_id = parameters.get("id")
code = "0"
response = {"activity": activity_id, "id": open_id, "status" : "-1"}
result = {"kind": "queryActivityJoinStatus", "code": code, "response": response}
if open_id is None or open_id == "":
code = "-1"
error = "请输入手机号"
elif not open_id_pattern.match(open_id):
code = "-1"
error = "手机号格式错误"
if activity_id is None or activity_id == "":
code = "-1"
error = "请输入赛事代码"
elif len(activity_id) != 4:
code = "-1"
error = "赛事代码应为4位"
if mysql_conn is None or not mysql_conn.is_connected():
code = "-1"
error = "系统内部错误"
if code == "-1":
response.update({"error": error})
result.update({"code": code, "response": response})
return result
mysql_conn.set_charset_collation('utf8')
cursor = mysql_conn.cursor()
sql = '''SELECT investorid FROM siminfo.t_investor WHERE openid = %s'''
cursor.execute(sql, (open_id,))
row = cursor.fetchone()
if row is None:
code = "-1"
error = "投资者尚未开户"
response.update({"error": error})
result.update({"code": code, "response": response})
else:
investor_id = str(row[0])
sql = '''SELECT activityid FROM siminfo.t_activity WHERE activityid = %s'''
cursor.execute(sql, (activity_id,))
row = cursor.fetchone()
if row is None:
code = "-1"
error = "赛事活动不存在"
response.update({"error": error})
result.update({"code": code, "response": response})
else:
sql = '''SELECT activityid, investorid, joindate FROM siminfo.t_activityinvestor WHERE activityid = %s AND investorid = %s'''
cursor.execute(sql, (activity_id, investor_id))
row = cursor.fetchone()
if row is not None:
response.update({"status": "1"})
result.update({"code": | |
<reponame>MaxGhenis/taxcalc-helpers
from typing import Callable, Union
from functools import wraps
import warnings
import copy
import numpy as np
import pandas as pd
class MicroSeries(pd.Series):
def __init__(self, *args, weights: np.array = None, **kwargs):
"""A Series-inheriting class for weighted microdata.
Weights can be provided at initialisation, or using set_weights.
:param weights: Array of weights.
:type weights: np.array
"""
super().__init__(*args, **kwargs)
self.set_weights(weights)
def weighted_function(fn: Callable) -> Callable:
@wraps(fn)
def safe_fn(*args, **kwargs):
try:
return fn(*args, **kwargs)
except ZeroDivisionError:
return np.NaN
return safe_fn
@weighted_function
def scalar_function(fn: Callable) -> Callable:
fn._rtype = float
return fn
@weighted_function
def vector_function(fn: Callable) -> Callable:
fn._rtype = pd.Series
return fn
def set_weights(self, weights: np.array) -> None:
"""Sets the weight values.
:param weights: Array of weights.
:type weights: np.array.
"""
if weights is None:
self.weights = pd.Series(np.ones_like(self.values), dtype=float)
else:
self.weights = pd.Series(weights, dtype=float)
@vector_function
def weight(self) -> pd.Series:
"""Calculates the weighted value of the MicroSeries.
:returns: A Series multiplying the MicroSeries by its weight.
:rtype: pd.Series
"""
return self.multiply(self.weights)
@scalar_function
def sum(self) -> float:
"""Calculates the weighted sum of the MicroSeries.
:returns: The weighted sum.
:rtype: float
"""
return self.multiply(self.weights).sum()
@scalar_function
def count(self) -> float:
"""Calculates the weighted count of the MicroSeries.
:returns: The weighted count.
"""
return self.weights.sum()
@scalar_function
def mean(self) -> float:
"""Calculates the weighted mean of the MicroSeries
:returns: The weighted mean.
:rtype: float
"""
return np.average(self.values, weights=self.weights)
def quantile(self, q: np.array) -> pd.Series:
"""Calculates weighted quantiles of the MicroSeries.
Doesn't exactly match unweighted quantiles of stacked values.
See stackoverflow.com/q/21844024#comment102342137_29677616.
:param q: Array of quantiles to calculate.
:type q: np.array
:return: Array of weighted quantiles.
:rtype: pd.Series
"""
values = np.array(self.values)
quantiles = np.array(q)
sample_weight = np.array(self.weights)
assert np.all(quantiles >= 0) and np.all(
quantiles <= 1
), "quantiles should be in [0, 1]"
sorter = np.argsort(values)
values = values[sorter]
sample_weight = sample_weight[sorter]
weighted_quantiles = np.cumsum(sample_weight) - 0.5 * sample_weight
weighted_quantiles /= np.sum(sample_weight)
result = np.interp(quantiles, weighted_quantiles, values)
if quantiles.shape == ():
return result
return pd.Series(result, index=quantiles)
@scalar_function
def median(self) -> float:
"""Calculates the weighted median of the MicroSeries.
:returns: The weighted median of a DataFrame's column.
:rtype: float
"""
return self.quantile(0.5)
@scalar_function
def gini(self, negatives: str = None) -> float:
"""Calculates Gini index.
:param negatives: An optional string indicating how to treat negative
values of x:
'zero' replaces negative values with zeroes.
'shift' subtracts the minimum value from all values of x,
when this minimum is negative. That is, it adds the absolute
minimum value.
Defaults to None, which leaves negative values as they are.
:type q: str
:returns: Gini index.
:rtype: float
"""
x = np.array(self).astype("float")
if negatives == "zero":
x[x < 0] = 0
if negatives == "shift" and np.amin(x) < 0:
x -= np.amin(x)
if (self.weights != np.ones(len(self))).any(): # Varying weights.
sorted_indices = np.argsort(self)
sorted_x = np.array(self[sorted_indices])
sorted_w = np.array(self.weights[sorted_indices])
cumw = np.cumsum(sorted_w)
cumxw = np.cumsum(sorted_x * sorted_w)
return np.sum(cumxw[1:] * cumw[:-1] - cumxw[:-1] * cumw[1:]) / (
cumxw[-1] * cumw[-1]
)
else:
sorted_x = np.sort(self)
n = len(x)
cumxw = np.cumsum(sorted_x)
# The above formula, with all weights equal to 1 simplifies to:
return (n + 1 - 2 * np.sum(cumxw) / cumxw[-1]) / n
@scalar_function
def top_x_pct_share(self, top_x_pct: float) -> float:
"""Calculates top x% share.
:param top_x_pct: Decimal between 0 and 1 of the top %, e.g. 0.1,
0.001.
:type top_x_pct: float
:returns: The weighted share held by the top x%.
:rtype: float
"""
threshold = self.quantile(1 - top_x_pct)
top_x_pct_sum = self[self >= threshold].sum()
total_sum = self.sum()
return top_x_pct_sum / total_sum
@scalar_function
def bottom_x_pct_share(self, bottom_x_pct) -> float:
"""Calculates bottom x% share.
:param bottom_x_pct: Decimal between 0 and 1 of the top %, e.g. 0.1,
0.001.
:type bottom_x_pct: float
:returns: The weighted share held by the bottom x%.
:rtype: float
"""
return 1 - self.top_x_pct_share(1 - bottom_x_pct)
@scalar_function
def bottom_50_pct_share(self) -> float:
"""Calculates bottom 50% share.
:returns: The weighted share held by the bottom 50%.
:rtype: float
"""
return self.bottom_x_pct_share(0.5)
@scalar_function
def top_50_pct_share(self) -> float:
"""Calculates top 50% share.
:returns: The weighted share held by the top 50%.
:rtype: float
"""
return self.top_x_pct_share(0.5)
@scalar_function
def top_10_pct_share(self) -> float:
"""Calculates top 10% share.
:returns: The weighted share held by the top 10%.
:rtype: float
"""
return self.top_x_pct_share(0.1)
@scalar_function
def top_1_pct_share(self) -> float:
"""Calculates top 1% share.
:returns: The weighted share held by the top 50%.
:rtype: float
"""
return self.top_x_pct_share(0.01)
@scalar_function
def top_0_1_pct_share(self) -> float:
"""Calculates top 0.1% share.
:returns: The weighted share held by the top 0.1%.
:rtype: float
"""
return self.top_x_pct_share(0.001)
@scalar_function
def t10_b50(self) -> float:
"""Calculates ratio between the top 10% and bottom 50% shares.
:returns: The weighted share held by the top 10% divided by
the weighted share held by the bottom 50%.
"""
t10 = self.top_10_pct_share()
b50 = self.bottom_50_pct_share()
return t10 / b50
@vector_function
def cumsum(self) -> pd.Series:
return pd.Series(self * self.weights).cumsum()
@vector_function
def rank(self, pct=False) -> pd.Series:
order = np.argsort(self.values)
inverse_order = np.argsort(order)
ranks = np.array(self.weights.values)[order].cumsum()[inverse_order]
if pct:
ranks /= self.weights.values.sum()
return pd.Series(ranks, index=self.index)
@vector_function
def decile_rank(self):
return MicroSeries(np.ceil(self.rank(pct=True) * 10))
@vector_function
def quintile_rank(self):
return MicroSeries(np.ceil(self.rank(pct=True) * 5))
@vector_function
def quartile_rank(self):
return MicroSeries(np.ceil(self.rank(pct=True) * 4))
@vector_function
def percentile_rank(self):
return MicroSeries(np.ceil(self.rank(pct=True) * 100))
def groupby(self, *args, **kwargs):
gb = super().groupby(*args, **kwargs)
gb.__class__ = MicroSeriesGroupBy
gb._init()
gb.weights = pd.Series(self.weights).groupby(*args, **kwargs)
return gb
def copy(self, deep=True):
res = super().copy(deep)
res = MicroSeries(res, weights=self.weights.copy(deep))
return res
def equals(self, other) -> bool:
equal_values = super().equals(other)
equal_weights = self.weights.equals(other.weights)
return equal_values and equal_weights
def __getitem__(self, key):
result = super().__getitem__(key)
if isinstance(result, pd.Series):
weights = self.weights.__getitem__(key)
return MicroSeries(result, weights=weights)
return result
def __getattr__(self, name):
return MicroSeries(super().__getattr__(name), weights=self.weights)
# operators
def __add__(self, other):
return MicroSeries(super().__add__(other), weights=self.weights)
def __sub__(self, other):
return MicroSeries(super().__sub__(other), weights=self.weights)
def __mul__(self, other):
return MicroSeries(super().__mul__(other), weights=self.weights)
def __floordiv__(self, other):
return MicroSeries(super().__floordiv__(other), weights=self.weights)
def __truediv__(self, other):
return MicroSeries(super().__truediv__(other), weights=self.weights)
def __mod__(self, other):
return MicroSeries(super().__mod__(other), weights=self.weights)
def __pow__(self, other):
return MicroSeries(super().__pow__(other), weights=self.weights)
# comparators
def __lt__(self, other):
return MicroSeries(super().__lt__(other), weights=self.weights)
def __le__(self, other):
return MicroSeries(super().__le__(other), weights=self.weights)
def __eq__(self, other):
return MicroSeries(super().__eq__(other), weights=self.weights)
def __ne__(self, other):
return MicroSeries(super().__ne__(other), weights=self.weights)
def __ge__(self, other):
return MicroSeries(super().__ge__(other), weights=self.weights)
def __gt__(self, other):
return MicroSeries(super().__gt__(other), weights=self.weights)
# assignment operators
def __iadd__(self, other):
return MicroSeries(super().__iadd__(other), weights=self.weights)
def __isub__(self, other):
return MicroSeries(super().__isub__(other), weights=self.weights)
def __imul__(self, other):
return MicroSeries(super().__imul__(other), weights=self.weights)
def __ifloordiv__(self, other):
return MicroSeries(super().__ifloordiv__(other), weights=self.weights)
def __idiv__(self, other):
return MicroSeries(super().__idiv__(other), weights=self.weights)
def __itruediv__(self, other):
return MicroSeries(super().__itruediv__(other), weights=self.weights)
def __imod__(self, other):
return MicroSeries(super().__imod__(other), weights=self.weights)
def __ipow__(self, other):
return MicroSeries(super().__ipow__(other), weights=self.weights)
# other
def __neg__(self, other):
return MicroSeries(super().__neg__(other), weights=self.weights)
def __pos__(self, other):
return MicroSeries(super().__pos__(other), weights=self.weights)
def __repr__(self):
return pd.DataFrame(
dict(value=self.values, weight=self.weights.values)
).__repr__()
MicroSeries.SCALAR_FUNCTIONS = [
fn
for fn in dir(MicroSeries)
if "_rtype" in dir(getattr(MicroSeries, fn))
and getattr(getattr(MicroSeries, fn), "_rtype") == float
]
MicroSeries.VECTOR_FUNCTIONS = [
fn
for fn in dir(MicroSeries)
if "_rtype" in dir(getattr(MicroSeries, fn))
and getattr(getattr(MicroSeries, fn), "_rtype") == pd.Series
]
MicroSeries.AGNOSTIC_FUNCTIONS = ["quantile"]
MicroSeries.FUNCTIONS = sum(
[
MicroSeries.SCALAR_FUNCTIONS,
MicroSeries.VECTOR_FUNCTIONS,
MicroSeries.AGNOSTIC_FUNCTIONS,
],
[],
)
class MicroSeriesGroupBy(pd.core.groupby.generic.SeriesGroupBy):
def _init(self):
def _weighted_agg(name) -> Callable:
def via_micro_series(row, *args, **kwargs):
return getattr(MicroSeries(row.a, weights=row.w), name)(
*args, **kwargs
)
fn = getattr(MicroSeries, name)
@wraps(fn)
def _weighted_agg_fn(*args, **kwargs):
arrays = self.apply(np.array)
weights = self.weights.apply(np.array)
df = pd.DataFrame(dict(a=arrays, w=weights))
is_array = len(args) > 0 and hasattr(args[0], "__len__")
if (
name in MicroSeries.SCALAR_FUNCTIONS
or name in MicroSeries.AGNOSTIC_FUNCTIONS
and not is_array
):
result = df.agg(
lambda row: via_micro_series(row, *args, **kwargs),
axis=1,
)
elif (
name in MicroSeries.VECTOR_FUNCTIONS
or name in MicroSeries.AGNOSTIC_FUNCTIONS
and is_array
):
result = df.apply(
lambda row: via_micro_series(row, *args, **kwargs),
axis=1,
)
return result.stack()
return result
return _weighted_agg_fn
for fn_name in MicroSeries.FUNCTIONS:
setattr(self, fn_name, _weighted_agg(fn_name))
class MicroDataFrameGroupBy(pd.core.groupby.generic.DataFrameGroupBy):
def _init(self, by: Union[str, list]):
self.columns = list(self.obj.columns)
if isinstance(by, list):
for column in by:
self.columns.remove(column)
elif isinstance(by, str):
self.columns.remove(by)
self.columns.remove("__tmp_weights")
for fn_name in MicroSeries.SCALAR_FUNCTIONS:
def get_fn(name):
def fn(*args, **kwargs):
return MicroDataFrame(
{
col: getattr(getattr(self, col), name)(
*args, **kwargs
)
for col in self.columns
}
)
return fn
setattr(self, fn_name, get_fn(fn_name))
for fn_name in MicroSeries.VECTOR_FUNCTIONS:
def get_fn(name):
def fn(*args, **kwargs):
return MicroDataFrame(
{
col: getattr(getattr(self, col), name)(
*args, **kwargs
)
for col in self.columns
}
)
return fn
setattr(self, fn_name, get_fn(fn_name))
class MicroDataFrame(pd.DataFrame):
def | |
xmin - value of x coodinate on the left side of frame
xmax - value of x coordinate on right side of frame
plot_type = - can be either 'strat' (for stratigraphic plot) or 'morph' (for morphologic plot)
filename - first few characters of the output filenames
dirname - name of directory where output files should be written
pb_age - age of point bars (in years) at which they get covered by vegetation (if the 'morph' option is used for 'plot_type')
ob_age - age of oxbow lakes (in years) at which they get covered by vegetation (if the 'morph' option is used for 'plot_type')
scale - scaling factor (e.g., 2) that determines how many times larger you want the frame to be, compared to the default scaling of the figure
"""
sclt = np.array(self.cl_times)
if len(end_time)>0:
sclt = sclt[sclt<=end_time]
channels = self.channels[:len(sclt)]
ymax = 0
for i in range(len(channels)):
ymax = max(ymax, np.max(np.abs(channels[i].y)))
ymax = ymax+2*channels[0].W # add a bit of space on top and bottom
ymin = -1*ymax
for i in range(0,len(sclt)):
fig = self.plot(plot_type,pb_age,ob_age,sclt[i])
fig_height = scale*fig.get_figheight()
fig_width = (xmax-xmin)*fig_height/(ymax-ymin)
fig.set_figwidth(fig_width)
fig.set_figheight(fig_height)
fig.gca().set_xlim(xmin,xmax)
fig.gca().set_xticks([])
fig.gca().set_yticks([])
plt.plot([xmin+200, xmin+200+5000],[ymin+200, ymin+200], 'k', linewidth=2)
plt.text(xmin+200+2000, ymin+200+100, '5 km', fontsize=14)
fname = dirname+filename+'%03d.png'%(i)
fig.savefig(fname, bbox_inches='tight')
plt.close()
def build_3d_model(self,model_type,h_mud,levee_width,h,w,bth,dcr,dx,delta_s,starttime,endtime,xmin,xmax,ymin,ymax):
"""method for building 3D model from set of centerlines (that are part of a ChannelBelt object)
Inputs:
model_type - model type ('fluvial' or 'submarine')
h_mud - maximum thickness of overbank mud
levee_width - width of overbank mud
h - channel depth
w - channel width
bth - thickness of channel sand (only used in submarine models)
dcr - critical channel depth where sand thickness goes to zero (only used in submarine models)
dx - cell size in x and y directions
delta_s - sampling distance alogn centerlines
starttime - age of centerline that will be used as the first centerline in the model
endtime - age of centerline that will be used as the last centerline in the model
xmin,xmax,ymin,ymax - x and y coordinates that define the model domain; if xmin is set to zero,
a plot of the centerlines is generated and the model domain has to be defined by clicking its upper
left and lower right corners
Returns: a ChannelBelt3D object
"""
sclt = np.array(self.cl_times)
ind1 = np.where(sclt>=starttime)[0][0]
ind2 = np.where(sclt<=endtime)[0][-1]
sclt = sclt[ind1:ind2+1]
channels = self.channels[ind1:ind2+1]
cot = np.array(self.cutoff_times)
if (len(cot)>0) & (len(np.where(cot>=starttime)[0])>0) & (len(np.where(cot<=endtime)[0])>0):
cfind1 = np.where(cot>=starttime)[0][0]
cfind2 = np.where(cot<=endtime)[0][-1]
cot = cot[cfind1:cfind2+1]
cutoffs = self.cutoffs[cfind1:cfind2+1]
else:
cot = []
cutoffs = []
n_steps = len(sclt) # number of events
if xmin == 0: # plot centerlines and define model domain
plt.figure(figsize=(15,4))
maxX, minY, maxY = 0, 0, 0
for i in range(n_steps): # plot centerlines
plt.plot(channels[i].x,channels[i].y,'k')
maxX = max(maxX,np.max(channels[i].x))
maxY = max(maxY,np.max(channels[i].y))
minY = min(minY,np.min(channels[i].y))
plt.axis([0,maxX,minY-10*w,maxY+10*w])
plt.gca().set_aspect('equal', adjustable='box')
plt.tight_layout()
pts = np.zeros((2,2))
for i in range(0,2):
pt = np.asarray(plt.ginput(1))
pts[i,:] = pt
plt.scatter(pt[0][0],pt[0][1])
plt.plot([pts[0,0],pts[1,0],pts[1,0],pts[0,0],pts[0,0]],[pts[0,1],pts[0,1],pts[1,1],pts[1,1],pts[0,1]],'r')
xmin = min(pts[0,0],pts[1,0])
xmax = max(pts[0,0],pts[1,0])
ymin = min(pts[0,1],pts[1,1])
ymax = max(pts[0,1],pts[1,1])
iwidth = int((xmax-xmin)/dx)
iheight = int((ymax-ymin)/dx)
topo = np.zeros((iheight,iwidth,4*n_steps)) # array for storing topographic surfaces
facies = np.zeros((4*n_steps,1))
# create initial topography:
x1 = np.linspace(0,iwidth-1,iwidth)
y1 = np.linspace(0,iheight-1,iheight)
xv, yv = np.meshgrid(x1,y1)
z1 = channels[0].z
z1 = z1[(channels[0].x>xmin) & (channels[0].x<xmax)]
topoinit = z1[0] - ((z1[0]-z1[-1])/(xmax-xmin))*xv*dx # initial (sloped) topography
topo[:,:,0] = topoinit.copy()
surf = topoinit.copy()
facies[0] = np.NaN
# generate surfaces:
channels3D = []
x_pixs = []
y_pixs = []
for i in range(n_steps):
update_progress(i/n_steps)
x = channels[i].x
y = channels[i].y
z = channels[i].z
cutoff_ind = []
# check if there were cutoffs during the last time step and collect indices in an array:
for j in range(len(cot)):
if (cot[j] >= sclt[i-1]) & (cot[j] < sclt[i]):
cutoff_ind = np.append(cutoff_ind,j)
# create distance map:
cl_dist, x_pix, y_pix, z_pix, s_pix, z_map, x1, y1, z1 = dist_map(x,y,z,xmin,xmax,ymin,ymax,dx,delta_s)
if i == 0:
cl_dist_prev = cl_dist
# erosion:
surf = np.minimum(surf,erosion_surface(h,w/dx,cl_dist,z_map))
topo[:,:,4*i] = surf # erosional surface
facies[4*i] = np.NaN
if model_type == 'fluvial':
pb = point_bar_surface(cl_dist,z_map,h,w/dx)
th = np.maximum(surf,pb)-surf
th_oxbows = th.copy()
# setting sand thickness to zero at cutoff locations:
if len(cutoff_ind)>0:
cutoff_dists = 1e10*np.ones(np.shape(th)) #initialize cutoff_dists with a large number
for j in range(len(cutoff_ind)):
cutoff_dist, cfx_pix, cfy_pix = cl_dist_map(cutoffs[int(cutoff_ind[j])].x[0],cutoffs[int(cutoff_ind[j])].y[0],cutoffs[int(cutoff_ind[j])].z[0],xmin,xmax,ymin,ymax,dx)
cutoff_dists = np.minimum(cutoff_dists,cutoff_dist)
th_oxbows[cutoff_dists>=0.9*w/dx] = 0 # set oxbow fill thickness to zero outside of oxbows
th[cutoff_dists<0.9*w/dx] = 0 # set point bar thickness to zero inside of oxbows
else: # no cutoffs
th_oxbows = np.zeros(np.shape(th))
th[th<0] = 0 # eliminate negative th values
surf = surf+th_oxbows # update topographic surface with oxbow deposit thickness
topo[:,:,4*i+1] = surf # top of oxbow mud
facies[4*i+1] = 0
surf = surf+th # update topographic surface with sand thickness
topo[:,:,4*i+2] = surf # top of sand
facies[4*i+2] = 1
surf = surf + mud_surface(h_mud,levee_width/dx,cl_dist,w/dx,z_map,surf) # mud/levee deposition
topo[:,:,4*i+3] = surf # top of levee
facies[4*i+3] = 2
channels3D.append(Channel(x1,y1,z1,w,h))
x_pixs.append(x_pix)
y_pixs.append(y_pix)
if model_type == 'submarine':
surf = surf + mud_surface(h_mud[i],levee_width/dx,cl_dist,w/dx,z_map,surf) # mud/levee deposition
topo[:,:,4*i+1] = surf # top of levee
facies[4*i+1] = 2
# sand thickness:
th, relief = sand_surface(surf,bth,dcr,z_map,h)
th[th<0] = 0 # eliminate negative th values
th[cl_dist>1.0*w/dx] = 0 # eliminate sand outside of channel
th_oxbows = th.copy()
# setting sand thickness to zero at cutoff locations:
if len(cutoff_ind)>0:
cutoff_dists = 1e10*np.ones(np.shape(th)) #initialize cutoff_dists with a large number
for j in range(len(cutoff_ind)):
cutoff_dist, cfx_pix, cfy_pix = cl_dist_map(cutoffs[int(cutoff_ind[j])].x[0],cutoffs[int(cutoff_ind[j])].y[0],cutoffs[int(cutoff_ind[j])].z[0],xmin,xmax,ymin,ymax,dx)
cutoff_dists = np.minimum(cutoff_dists,cutoff_dist)
th_oxbows[cutoff_dists>=0.9*w/dx] = 0 # set oxbow fill thickness to zero outside of oxbows
th[cutoff_dists<0.9*w/dx] = 0 # set point bar thickness to zero inside of oxbows
# adding back sand near the channel axis (submarine only):
# th[cl_dist<0.5*w/dx] = bth*(1 - relief[cl_dist<0.5*w/dx]/dcr)
else: # no cutoffs
th_oxbows = np.zeros(np.shape(th))
surf = surf+th_oxbows # update topographic surface with oxbow deposit thickness
topo[:,:,4*i+2] = surf # top of oxbow mud
facies[4*i+2] = 0
surf = surf+th # update topographic surface with sand thickness
topo[:,:,4*i+3] = surf # top of sand
facies[4*i+3] = 1
cl_dist_prev = cl_dist.copy()
topo = np.concatenate((np.reshape(topoinit,(iheight,iwidth,1)),topo),axis=2) # add initial topography to array
strat = topostrat(topo) # create stratigraphic surfaces
strat = np.delete(strat, np.arange(4*n_steps+1)[1::4], 2) # get rid of unnecessary stratigraphic surfaces (duplicates)
facies = np.delete(facies, np.arange(4*n_steps)[::4]) # get rid of unnecessary facies layers (NaNs)
if model_type == 'fluvial':
facies_code = {0:'oxbow', 1:'point bar', 2:'levee'}
if model_type == 'submarine':
facies_code = {0:'oxbow', 1:'channel sand', 2:'levee'}
chb_3d = ChannelBelt3D(model_type,topo,strat,facies,facies_code,dx,channels3D)
return chb_3d, xmin, xmax, ymin, ymax
def resample_centerline(x,y,z,deltas):
dx, dy, dz, ds, s = compute_derivatives(x,y,z) # compute derivatives
# resample centerline so that 'deltas' is roughly constant
# [parametric spline representation of curve; note that there is *no* smoothing]
tck, u = scipy.interpolate.splprep([x,y,z],s=0)
unew = np.linspace(0,1,1 + np.int(s[-1]/deltas)) # vector for resampling
out = scipy.interpolate.splev(unew,tck) # resampling
x, y, z = out[0], out[1], out[2] # assign new coordinate values
dx, dy, dz, ds, s = compute_derivatives(x,y,z) # recompute derivatives
return x,y,z,dx,dy,dz,ds,s
def migrate_one_step(x,y,z,W,kl,dt,k,Cf,D,pad,pad1,omega,gamma):
ns=len(x)
curv = compute_curvature(x,y)
dx, dy, dz, ds, s = compute_derivatives(x,y,z)
curv = W*curv # dimensionless curvature
R0 = kl*curv # simple linear relationship between curvature and nominal migration rate
alpha = k*2*Cf/D # exponent for convolution function G
R1 = compute_migration_rate(pad,ns,ds,alpha,omega,gamma,R0)
# calculate new centerline coordinates:
dy_ds = dy[pad1:ns-pad+1]/ds[pad1:ns-pad+1]
dx_ds = dx[pad1:ns-pad+1]/ds[pad1:ns-pad+1]
# adjust x and y coordinates (this *is* the migration):
x[pad1:ns-pad+1] = x[pad1:ns-pad+1] + R1[pad1:ns-pad+1]*dy_ds*dt
y[pad1:ns-pad+1] = y[pad1:ns-pad+1] - R1[pad1:ns-pad+1]*dx_ds*dt
return x,y
def generate_initial_channel(W,D,Sl,deltas,pad,n_bends):
"""generate straight Channel object with some noise added that can serve
as input for initializing a ChannelBelt object
W - channel width
D - channel depth
Sl - channel gradient
deltas - distance between nodes on centerline
pad - padding (number of nodepoints along centerline)
n_bends - approximate number of bends to be simulated"""
noisy_len = n_bends*10*W/2.0 # length of noisy part of initial centerline
pad1 = int(pad/10.0) # padding at upstream end can be shorter than padding on downstream end
if pad1<5:
pad1 = 5
| |
'double':
return [validators.IS_FLOAT_IN_RANGE(-1e100, 1e100)]
elif field_type == 'integer':
return [validators.IS_INT_IN_RANGE(-1e100, 1e100)]
elif field_type == 'date':
return [validators.IS_DATE()]
elif field_type == 'time':
return [validators.IS_TIME()]
elif field_type == 'datetime':
return [validators.IS_DATETIME()]
else:
return []
def sql_represent(obj, fieldtype, dbname):
if type(obj) in (types.LambdaType, types.FunctionType):
obj = obj()
if isinstance(obj, (Expression, Field)):
return obj
if isinstance(fieldtype, SQLCustomType):
return fieldtype.encoder(obj)
if obj is None:
return 'NULL'
if obj == '' and fieldtype[:2] in ['id', 'in', 're', 'da', 'ti', 'bo']:
return 'NULL'
if fieldtype == 'boolean':
if dbname == 'mssql':
if obj and not str(obj)[0].upper() == 'F':
return '1'
else:
return '0'
else:
if obj and not str(obj)[0].upper() == 'F':
return "'T'"
else:
return "'F'"
if fieldtype[0] == 'i':
return str(int(obj))
elif fieldtype[0] == 'r': # reference
if fieldtype.find('.')>0:
return repr(obj)
elif isinstance(obj, (Row, Reference)):
return obj['id']
return str(int(obj))
elif fieldtype == 'double':
return repr(float(obj))
if isinstance(obj, unicode):
obj = obj.encode('utf-8')
if fieldtype == 'blob':
obj = base64.b64encode(str(obj))
if dbname == 'db2':
return "BLOB('%s')" % obj
if dbname == 'oracle':
return ":CLOB('%s')" % obj
elif isinstance(obj, str):
try:
obj.decode('utf-8')
except:
obj = obj.decode('latin1').encode('utf8')
# FIXME: remove comment lines?
#elif fieldtype == 'text':
# if dbname == 'oracle':
# return ":CLOB('%s')" % obj.replace("'","?") ### FIX THIS
elif fieldtype == 'date':
# FIXME: remove comment lines?
# if dbname=='postgres': return "'%s'::bytea" % obj.replace("'","''")
if isinstance(obj, (datetime.date, datetime.datetime)):
obj = obj.isoformat()[:10]
else:
obj = str(obj)
if dbname in ['oracle', 'informix']:
return "to_date('%s','yyyy-mm-dd')" % obj
elif fieldtype == 'datetime':
if isinstance(obj, datetime.datetime):
if dbname == 'db2':
return "'%s'" % obj.isoformat()[:19].replace('T','-').replace(':','.')
else:
obj = obj.isoformat()[:19].replace('T',' ')
elif isinstance(obj, datetime.date):
if dbname == 'db2':
return "'%s'" % obj.isoformat()[:10]+'-00.00.00'
else:
obj = obj.isoformat()[:10]+' 00:00:00'
else:
obj = str(obj)
if dbname in ['oracle', 'informix']:
return "to_date('%s','yyyy-mm-dd hh24:mi:ss')" % obj
elif fieldtype == 'time':
if isinstance(obj, datetime.time):
obj = obj.isoformat()[:10]
else:
obj = str(obj)
elif dbname == 'mssql2' and (fieldtype == 'string' or fieldtype == 'text'):
return "N'%s'" % str(obj).replace("'", "''")
else:
obj = str(obj)
return "'%s'" % obj.replace("'", "''")
def cleanup(text):
"""
validates that the given text is clean: only contains [0-9a-zA-Z_]
"""
if re.compile('[^0-9a-zA-Z_]').findall(text):
raise SyntaxError, \
'only [0-9a-zA-Z_] allowed in table and field names, received %s' \
% text
return text
def sqlite3_web2py_extract(lookup, s):
table = {
'year': (0, 4),
'month': (5, 7),
'day': (8, 10),
'hour': (11, 13),
'minute': (14, 16),
'second': (17, 19),
}
try:
(i, j) = table[lookup]
return int(s[i:j])
except:
return None
def oracle_fix_execute(command, execute):
args = []
i = 1
while True:
m = oracle_fix.match(command)
if not m:
break
command = command[:m.start('clob')] + str(i) + command[m.end('clob'):]
args.append(m.group('clob')[6:-2].replace("''", "'"))
i += 1
return execute(command[:-1], args)
def autofields(db, text):
raise SyntaxError, "work in progress"
m = re.compile('(?P<i>\w+)')
(tablename, fields) = text.lower().split(':', 1)
tablename = tablename.replace(' ', '_')
newfields = []
for field in fields.split(','):
if field.find(' by ') >= 0:
(items, keys) = field.split(' by ')
else:
(items, keys) = (field, '%(id)s')
items = m.findall(items)
if not items:
break
keys = m.sub('%(\g<i>)s', keys)
(requires, notnull, unique) = (None, False, False)
if items[-1] in ['notnull']:
(notnull, items) = (True, items[:-1])
if items[-1] in ['unique']:
(unique, items) = (True, items[:-1])
if items[-1] in ['text', 'date', 'datetime', 'time', 'blob', 'upload', 'password',
'integer', 'double', 'boolean', 'string']:
(items, t) = (item[:-1], items[-1])
elif items[-1] in db.tables:
t = 'reference %s' % items[-1]
requires = validators.IS_IN_DB(db, '%s.id' % items[-1], keys)
else:
t = 'string'
name = '_'.join(items)
if unique:
if requires:
raise SyntaxError, "Sorry not supported"
requires = validators.IS_NOT_IN_DB(db, '%s.%s' % (tablename, name))
if requires and not notnull:
requires = validators.IS_EMPTY_OR(requires)
label = ' '.join([i.capitalize() for i in items])
newfields.append(db.Field(name, t, label=label, requires=requires,
notnull=notnull, unique=unique))
return tablename, newfields
class Row(dict):
"""
a dictionary that lets you do d['a'] as well as d.a
this is only used to store a Row
"""
def __getitem__(self, key):
key=str(key)
if key in self.get('_extra',{}):
return self._extra[key]
return dict.__getitem__(self, key)
def __setitem__(self, key, value):
dict.__setitem__(self, str(key), value)
def __getattr__(self, key):
return dict.__getitem__(self,key)
def __setattr__(self, key, value):
dict.__setitem__(self,key,value)
def __repr__(self):
return '<Row ' + dict.__repr__(self) + '>'
def __int__(self):
return dict.__getitem__(self,'id')
def __eq__(self,other):
return self.as_dict() == other.as_dict()
def __ne__(self,other):
return self.as_dict() != other.as_dict()
def as_dict(self,datetime_to_str=False):
d = dict(self)
for k in copy.copy(d.keys()):
v=d[k]
if isinstance(v,Row):
d[k]=v.as_dict()
elif isinstance(v,Reference):
d[k]=int(v)
elif isinstance(v, (datetime.date, datetime.datetime, datetime.time)):
if datetime_to_str:
d[k] = v.isoformat().replace('T',' ')[:19]
elif not isinstance(v,(str,unicode,int,long,float,bool)):
del d[k]
return d
def Row_unpickler(data):
return Row(marshal.loads(data))
def Row_pickler(data):
return Row_unpickler, (marshal.dumps(data.as_dict()),)
copy_reg.pickle(Row, Row_pickler, Row_unpickler)
class SQLCallableList(list):
def __call__(self):
return copy.copy(self)
class SQLDB(dict):
"""
an instance of this class represents a database connection
Example::
db = SQLDB('sqlite://test.db')
db.define_table('tablename', Field('fieldname1'),
Field('fieldname2'))
"""
# ## this allows gluon to comunite a folder for this thread
_folders = {}
_connection_pools = {}
_instances = {}
@staticmethod
def _set_thread_folder(folder):
sql_locker.acquire()
SQLDB._folders[thread.get_ident()] = folder
sql_locker.release()
# ## this allows gluon to commit/rollback all dbs in this thread
@staticmethod
def close_all_instances(action):
""" to close cleanly databases in a multithreaded environment """
sql_locker.acquire()
pid = thread.get_ident()
if pid in SQLDB._folders:
del SQLDB._folders[pid]
if pid in SQLDB._instances:
instances = SQLDB._instances[pid]
while instances:
instance = instances.pop()
sql_locker.release()
action(instance)
sql_locker.acquire()
# ## if you want pools, recycle this connection
really = True
if instance._pool_size:
pool = SQLDB._connection_pools[instance._uri]
if len(pool) < instance._pool_size:
pool.append(instance._connection)
really = False
if really:
sql_locker.release()
instance._connection.close()
sql_locker.acquire()
del SQLDB._instances[pid]
sql_locker.release()
return
@staticmethod
def distributed_transaction_begin(*instances):
if not instances:
return
instances = enumerate(instances)
for (i, db) in instances:
if db._dbname == 'mysql':
db._execute('XA START;')
elif db._dbname == 'postgres':
pass
else:
raise SyntaxError, \
'distributed transaction only supported by postgresql'
@staticmethod
def distributed_transaction_commit(*instances):
if not instances:
return
instances = enumerate(instances)
thread_key = '%s.%i' % (socket.gethostname(), thread.get_ident())
keys = ['%s.%i' % (thread_key, i) for (i,db) in instances]
for (i, db) in instances:
if not db._dbname in ['postgres', 'mysql', 'firebird']:
raise SyntaxError, \
'distributed transaction only supported by postgresql, firebir'
try:
for (i, db) in instances:
if db._dbname == 'postgres':
db._execute("PREPARE TRANSACTION '%s';" % keys[i])
elif db._dbname == 'mysql':
db._execute("XA END;")
db._execute("XA PREPARE;")
elif db._dbname == 'firebird':
db.prepare()
except:
for (i, db) in instances:
if db._dbname == 'postgres':
db._execute("ROLLBACK PREPARED '%s';" % keys[i])
elif db._dbname == 'mysql':
db._execute("XA ROLLBACK;")
elif db._dbname == 'firebird':
db.rollback()
raise Exception, 'failure to commit distributed transaction'
else:
for (i, db) in instances:
if db._dbname == 'postgres':
db._execute("COMMIT PREPARED '%s';" % keys[i])
elif db._dbname == 'mysql':
db._execute("XA COMMIT;")
elif db._dbname == 'firebird':
db.commit()
return
def _pool_connection(self, f):
# ## deal with particular case first:
if not self._pool_size:
self._connection = f()
return
uri = self._uri
sql_locker.acquire()
if not uri in self._connection_pools:
self._connection_pools[uri] = []
if self._connection_pools[uri]:
self._connection = self._connection_pools[uri].pop()
sql_locker.release()
else:
sql_locker.release()
self._connection = f()
def __init__(self, uri='sqlite://dummy.db', pool_size=0, folder=None):
self._uri = str(uri) # NOTE: assuming it is in utf8!!!
self._pool_size = pool_size
self['_lastsql'] = ''
self.tables = SQLCallableList()
pid = thread.get_ident()
# Check if there is a folder for this thread else use ''
if folder:
self._folder = folder
else:
sql_locker.acquire()
if pid in self._folders:
self._folder = self._folders[pid]
else:
self._folder = self._folders[pid] = ''
sql_locker.release()
# Creating the folder if it does not exist
if self._folder:
if not os.path.exists(self._folder):
os.mkdir(self._folder)
# Now connect to database
if self._uri[:14] == 'sqlite:memory:':
self._dbname = 'sqlite'
self._pool_connection(lambda: \
sqlite3.Connection(':memory:',
check_same_thread=False))
self._connection.create_function('web2py_extract', 2,
sqlite3_web2py_extract)
# self._connection.row_factory = sqlite3.Row
self._cursor = self._connection.cursor()
self._execute = lambda *a, **b: self._cursor.execute(*a, **b)
elif not is_jdbc and self._uri[:9] == 'sqlite://':
self._dbname = 'sqlite'
path_encoding = sys.getfilesystemencoding() or \
locale.getdefaultlocale()[1]
if uri[9] != '/':
dbpath = os.path.join(
self._folder.decode(path_encoding).encode('utf8'),
uri[9:])
else:
dbpath = uri[9:]
self._pool_connection(lambda : sqlite3.Connection(dbpath,
check_same_thread=False))
self._connection.create_function('web2py_extract', 2,
sqlite3_web2py_extract)
# self._connection.row_factory = sqlite3.Row
self._cursor = self._connection.cursor()
self._execute = lambda *a, **b: self._cursor.execute(*a, **b)
elif self._uri[:8] == 'mysql://':
self._dbname = 'mysql'
m = re.compile('^(?P<user>[^:@]+)(\:(?P<passwd>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^?]+)(\?set_encoding=(?P<charset>\w+))?$'
).match(self._uri[8:])
if not m:
raise SyntaxError, \
"Invalid URI string in SQLDB: %s" % self._uri
user = m.group('user')
if not user:
raise SyntaxError, 'User required'
passwd = m.group('passwd')
if not passwd:
passwd = ''
host = m.group('host')
if not host:
raise SyntaxError, 'Host name required'
db = m.group('db')
if not db:
| |
include AND exclude!
if not include.isdisjoint(exclude):
raise ValueError('include and exclude overlap on %s' %
(include & exclude))
# empty include/exclude -> defaults to True
# three cases (we've already raised if both are empty)
# case 1: empty include, nonempty exclude
# we have True, True, ... True for include, same for exclude
# in the loop below we get the excluded
# and when we call '&' below we get only the excluded
# case 2: nonempty include, empty exclude
# same as case 1, but with include
# case 3: both nonempty
# the "union" of the logic of case 1 and case 2:
# we get the included and excluded, and return their logical and
include_these = Series(not bool(include), index=self.columns)
exclude_these = Series(not bool(exclude), index=self.columns)
def is_dtype_instance_mapper(column, dtype):
return column, functools.partial(issubclass, dtype.type)
for column, f in itertools.starmap(is_dtype_instance_mapper,
self.dtypes.iteritems()):
if include: # checks for the case of empty include or exclude
include_these[column] = any(map(f, include))
if exclude:
exclude_these[column] = not any(map(f, exclude))
dtype_indexer = include_these & exclude_these
return self.loc[com._get_info_slice(self, dtype_indexer)]
def _box_item_values(self, key, values):
items = self.columns[self.columns.get_loc(key)]
if values.ndim == 2:
return self._constructor(values.T, columns=items, index=self.index)
else:
return self._box_col_values(values, items)
def _box_col_values(self, values, items):
""" provide boxed values for a column """
klass = _get_sliced_frame_result_type(values, self)
return klass(values, index=self.index, name=items, fastpath=True)
def __setitem__(self, key, value):
key = com._apply_if_callable(key, self)
# see if we can slice the rows
indexer = convert_to_index_sliceable(self, key)
if indexer is not None:
return self._setitem_slice(indexer, value)
if isinstance(key, DataFrame) or getattr(key, 'ndim', None) == 2:
self._setitem_frame(key, value)
elif isinstance(key, (Series, np.ndarray, list, Index)):
self._setitem_array(key, value)
else:
# set column
self._set_item(key, value)
def _setitem_slice(self, key, value):
self._check_setitem_copy()
self.loc._setitem_with_indexer(key, value)
def _setitem_array(self, key, value):
# also raises Exception if object array with NA values
if com.is_bool_indexer(key):
if len(key) != len(self.index):
raise ValueError('Item wrong length %d instead of %d!' %
(len(key), len(self.index)))
key = check_bool_indexer(self.index, key)
indexer = key.nonzero()[0]
self._check_setitem_copy()
self.loc._setitem_with_indexer(indexer, value)
else:
if isinstance(value, DataFrame):
if len(value.columns) != len(key):
raise ValueError('Columns must be same length as key')
for k1, k2 in zip(key, value.columns):
self[k1] = value[k2]
else:
indexer = self.loc._convert_to_indexer(key, axis=1)
self._check_setitem_copy()
self.loc._setitem_with_indexer((slice(None), indexer), value)
def _setitem_frame(self, key, value):
# support boolean setting with DataFrame input, e.g.
# df[df > df2] = 0
if isinstance(key, np.ndarray):
if key.shape != self.shape:
raise ValueError(
'Array conditional must be same shape as self'
)
key = self._constructor(key, **self._construct_axes_dict())
if key.values.size and not is_bool_dtype(key.values):
raise TypeError(
'Must pass DataFrame or 2-d ndarray with boolean values only'
)
self._check_inplace_setting(value)
self._check_setitem_copy()
self._where(-key, value, inplace=True)
def _ensure_valid_index(self, value):
"""
ensure that if we don't have an index, that we can create one from the
passed value
"""
# GH5632, make sure that we are a Series convertible
if not len(self.index) and is_list_like(value):
try:
value = Series(value)
except:
raise ValueError('Cannot set a frame with no defined index '
'and a value that cannot be converted to a '
'Series')
self._data = self._data.reindex_axis(value.index.copy(), axis=1,
fill_value=np.nan)
def _set_item(self, key, value):
"""
Add series to DataFrame in specified column.
If series is a numpy-array (not a Series/TimeSeries), it must be the
same length as the DataFrames index or an error will be thrown.
Series/TimeSeries will be conformed to the DataFrames index to
ensure homogeneity.
"""
self._ensure_valid_index(value)
value = self._sanitize_column(key, value)
NDFrame._set_item(self, key, value)
# check if we are modifying a copy
# try to set first as we want an invalid
# value exception to occur first
if len(self):
self._check_setitem_copy()
def insert(self, loc, column, value, allow_duplicates=False):
"""
Insert column into DataFrame at specified location.
Raises a ValueError if `column` is already contained in the DataFrame,
unless `allow_duplicates` is set to True.
Parameters
----------
loc : int
Insertion index. Must verify 0 <= loc <= len(columns)
column : string, number, or hashable object
label of the inserted column
value : int, Series, or array-like
allow_duplicates : bool, optional
"""
self._ensure_valid_index(value)
value = self._sanitize_column(column, value, broadcast=False)
self._data.insert(loc, column, value,
allow_duplicates=allow_duplicates)
def assign(self, **kwargs):
r"""
Assign new columns to a DataFrame, returning a new object
(a copy) with all the original columns in addition to the new ones.
Parameters
----------
kwargs : keyword, value pairs
keywords are the column names. If the values are
callable, they are computed on the DataFrame and
assigned to the new columns. The callable must not
change input DataFrame (though pandas doesn't check it).
If the values are not callable, (e.g. a Series, scalar, or array),
they are simply assigned.
Returns
-------
df : DataFrame
A new DataFrame with the new columns in addition to
all the existing columns.
Notes
-----
Assigning multiple columns within the same ``assign`` is possible.
For Python 3.6 and above, later items in '\*\*kwargs' may refer to
newly created or modified columns in 'df'; items are computed and
assigned into 'df' in order. For Python 3.5 and below, the order of
keyword arguments is not specified, you cannot refer to newly created
or modified columns. All items are computed first, and then assigned
in alphabetical order.
.. versionchanged :: 0.23.0
Keyword argument order is maintained for Python 3.6 and later.
Examples
--------
>>> df = pd.DataFrame({'A': range(1, 11), 'B': np.random.randn(10)})
Where the value is a callable, evaluated on `df`:
>>> df.assign(ln_A = lambda x: np.log(x.A))
A B ln_A
0 1 0.426905 0.000000
1 2 -0.780949 0.693147
2 3 -0.418711 1.098612
3 4 -0.269708 1.386294
4 5 -0.274002 1.609438
5 6 -0.500792 1.791759
6 7 1.649697 1.945910
7 8 -1.495604 2.079442
8 9 0.549296 2.197225
9 10 -0.758542 2.302585
Where the value already exists and is inserted:
>>> newcol = np.log(df['A'])
>>> df.assign(ln_A=newcol)
A B ln_A
0 1 0.426905 0.000000
1 2 -0.780949 0.693147
2 3 -0.418711 1.098612
3 4 -0.269708 1.386294
4 5 -0.274002 1.609438
5 6 -0.500792 1.791759
6 7 1.649697 1.945910
7 8 -1.495604 2.079442
8 9 0.549296 2.197225
9 10 -0.758542 2.302585
Where the keyword arguments depend on each other
>>> df = pd.DataFrame({'A': [1, 2, 3]})
>>> df.assign(B=df.A, C=lambda x:x['A']+ x['B'])
A B C
0 1 1 2
1 2 2 4
2 3 3 6
"""
data = self.copy()
# >= 3.6 preserve order of kwargs
if PY36:
for k, v in kwargs.items():
data[k] = com._apply_if_callable(v, data)
else:
# <= 3.5: do all calculations first...
results = OrderedDict()
for k, v in kwargs.items():
results[k] = com._apply_if_callable(v, data)
# <= 3.5 and earlier
results = sorted(results.items())
# ... and then assign
for k, v in results:
data[k] = v
return data
def _sanitize_column(self, key, value, broadcast=True):
"""
Ensures new columns (which go into the BlockManager as new blocks) are
always copied and converted into an array.
Parameters
----------
key : object
value : scalar, Series, or array-like
broadcast : bool, default True
If ``key`` matches multiple duplicate column names in the
DataFrame, this parameter indicates whether ``value`` should be
tiled so that the returned array contains a (duplicated) column for
each occurrence of the key. If False, ``value`` will not be tiled.
Returns
-------
sanitized_column : numpy-array
"""
def reindexer(value):
# reindex if necessary
if value.index.equals(self.index) or not len(self.index):
value = value._values.copy()
else:
# GH 4107
try:
value = value.reindex(self.index)._values
except Exception as e:
# duplicate axis
if not value.index.is_unique:
raise e
# other
raise TypeError('incompatible index of inserted column '
'with frame index')
return value
if isinstance(value, Series):
value = reindexer(value)
elif isinstance(value, DataFrame):
# align right-hand-side columns if self.columns
# is multi-index and self[key] is a sub-frame
if isinstance(self.columns, MultiIndex) and key in self.columns:
loc = self.columns.get_loc(key)
if isinstance(loc, (slice, Series, np.ndarray, Index)):
cols = maybe_droplevels(self.columns[loc], key)
if len(cols) and not cols.equals(value.columns):
value = value.reindex(cols, axis=1)
# now align rows
value = reindexer(value).T
elif isinstance(value, ExtensionArray):
value = value.copy()
elif isinstance(value, Index) or is_sequence(value):
from pandas.core.series import _sanitize_index
# turn me into an ndarray
value = _sanitize_index(value, self.index, copy=False)
if not isinstance(value, (np.ndarray, Index)):
if isinstance(value, | |
classsqlalchemy_utils.types.json.JSONType(*args, **kwargs)[source]
# JSONType offers way of saving JSON data structures to database. On PostgreSQL the underlying implementation of this data type is ‘json’ while on other databases its simply ‘text’.
# from sqlalchemy_utils import JSONType
# class Product(Base):
# __tablename__ = 'product'
# id = sa.Column(sa.Integer, autoincrement=True)
# name = sa.Column(sa.Unicode(50))
# details = sa.Column(JSONType)
# product = Product()
# product.details = {
# 'color': 'red',
# 'type': 'car',
# 'max-speed': '400 mph'
# }
# session.commit()
# LocaleType
# classsqlalchemy_utils.types.locale.LocaleType[source]
# LocaleType saves Babel Locale objects into database. The Locale objects are converted to string on the way in and back to object on the way out.
# In order to use LocaleType you need to install Babel first.
# from sqlalchemy_utils import LocaleType
# from babel import Locale
# class User(Base):
# __tablename__ = 'user'
# id = sa.Column(sa.Integer, autoincrement=True)
# name = sa.Column(sa.Unicode(50))
# locale = sa.Column(LocaleType)
# user = User()
# user.locale = Locale('en_US')
# session.add(user)
# session.commit()
# Like many other types this type also supports scalar coercion:
# user.locale = 'de_DE'
# user.locale # Locale('de', territory='DE')
# LtreeType
# classsqlalchemy_utils.types.ltree.LtreeType[source]
# Postgresql LtreeType type.
# The LtreeType datatype can be used for representing labels of data stored in hierarchial tree-like structure. For more detailed information please refer to http://www.postgresql.org/docs/current/static/ltree.html
# from sqlalchemy_utils import LtreeType
# class DocumentSection(Base):
# __tablename__ = 'document_section'
# id = sa.Column(sa.Integer, autoincrement=True)
# path = sa.Column(LtreeType)
# section = DocumentSection(name='Countries.Finland')
# session.add(section)
# session.commit()
# section.path # Ltree('Countries.Finland')
# Note
# Using LtreeType, LQUERY and LTXTQUERY types may require installation of Postgresql ltree extension on the server side. Please visit http://www.postgres.org for details.
# classsqlalchemy_utils.primitives.ltree.Ltree(path_or_ltree)[source]
# Ltree class wraps a valid string label path. It provides various convenience properties and methods.
# from sqlalchemy_utils import Ltree
# Ltree('1.2.3').path # '1.2.3'
# Ltree always validates the given path.
# Ltree(None) # raises TypeError
# Ltree('..') # raises ValueError
# Validator is also available as class method.
# Ltree.validate('1.2.3')
# Ltree.validate(None) # raises ValueError
# Ltree supports equality operators.
# Ltree('Countries.Finland') == Ltree('Countries.Finland')
# Ltree('Countries.Germany') != Ltree('Countries.Finland')
# Ltree objects are hashable.
# assert hash(Ltree('Finland')) == hash('Finland')
# Ltree objects have length.
# assert len(Ltree('1.2')) 2
# assert len(Ltree('some.one.some.where')) # 4
# You can easily find subpath indexes.
# assert Ltree('1.2.3').index('2.3') == 1
# assert Ltree('1.2.3.4.5').index('3.4') == 2
# Ltree objects can be sliced.
# assert Ltree('1.2.3')[0:2] == Ltree('1.2')
# assert Ltree('1.2.3')[1:] == Ltree('2.3')
# Finding longest common ancestor.
# assert Ltree('1.2.3.4.5').lca('1.2.3', '1.2.3.4', '1.2.3') == '1.2'
# assert Ltree('1.2.3.4.5').lca('1.2', '1.2.3') == '1'
# Ltree objects can be concatenated.
# assert Ltree('1.2') + Ltree('1.2') == Ltree('1.2.1.2')
# IPAddressType
# classsqlalchemy_utils.types.ip_address.IPAddressType(max_length=50, *args, **kwargs)[source]
# Changes IPAddress objects to a string representation on the way in and changes them back to IPAddress objects on the way out.
# IPAddressType uses ipaddress package on Python >= 3 and ipaddr package on Python 2. In order to use IPAddressType with python you need to install ipaddr first.
# from sqlalchemy_utils import IPAddressType
# class User(Base):
# __tablename__ = 'user'
# id = sa.Column(sa.Integer, autoincrement=True)
# name = sa.Column(sa.Unicode(255))
# ip_address = sa.Column(IPAddressType)
# user = User()
# user.ip_address = '172.16.58.3'
# session.add(user)
# session.commit()
# user.ip_address # IPAddress object
# PasswordType
# classsqlalchemy_utils.types.password.PasswordType(max_length=None, **kwargs)[source]
# PasswordType hashes passwords as they come into the database and allows verifying them using a Pythonic interface. This Pythonic interface relies on setting up automatic data type coercion using the force_auto_coercion() function.
# All keyword arguments (aside from max_length) are forwarded to the construction of a passlib.context.LazyCryptContext object, which also supports deferred configuration via the onload callback.
# The following usage will create a password column that will automatically hash new passwords as pbkdf2_sha512 but still compare passwords against pre-existing md5_crypt hashes. As passwords are compared; the password hash in the database will be updated to be pbkdf2_sha512.
# class Model(Base):
# password = sa.Column(PasswordType(
# schemes=[
# 'pbkdf2_sha512',
# 'md5_crypt'
# ],
# deprecated=['md5_crypt']
# ))
# Verifying password is as easy as:
# target = Model()
# target.password = 'b'
# # '$5$rounds=80000$H.............'
# target.password == 'b'
# # True
# Lazy configuration of the type with Flask config:
# import flask
# from sqlalchemy_utils import PasswordType, force_auto_coercion
# force_auto_coercion()
# class User(db.Model):
# __tablename__ = 'user'
# password = db.Column(
# PasswordType(
# # The returned dictionary is forwarded to the CryptContext
# onload=lambda **kwargs: dict(
# schemes=flask.current_app.config['PASSWORD_SCHEMES'],
# **kwargs
# ),
# ),
# unique=False,
# nullable=False,
# )
# PhoneNumberType
# classsqlalchemy_utils.types.phone_number.PhoneNumber(raw_number, region=None, check_region=True)[source]
# Extends a PhoneNumber class from Python phonenumbers library. Adds different phone number formats to attributes, so they can be easily used in templates. Phone number validation method is also implemented.
# Takes the raw phone number and country code as params and parses them into a PhoneNumber object.
# from sqlalchemy_utils import PhoneNumber
# class User(self.Base):
# __tablename__ = 'user'
# id = sa.Column(sa.Integer, autoincrement=True, primary_key=True)
# name = sa.Column(sa.Unicode(255))
# _phone_number = sa.Column(sa.Unicode(20))
# country_code = sa.Column(sa.Unicode(8))
# phonenumber = sa.orm.composite(
# PhoneNumber,
# _phone_number,
# country_code
# )
# user = User(phone_number=PhoneNumber('0401234567', 'FI'))
# user.phone_number.e164 # u'+358401234567'
# user.phone_number.international # u'+358 40 1234567'
# user.phone_number.national # u'040 1234567'
# user.country_code # 'FI'
# Parameters:
# raw_number – String representation of the phone number.
# region – Region of the phone number.
# check_region – Whether to check the supplied region parameter; should always be True for external callers. Can be useful for short codes or toll free
# classsqlalchemy_utils.types.phone_number.PhoneNumberType(region='US', max_length=20, *args, **kwargs)[source]
# Changes PhoneNumber objects to a string representation on the way in and changes them back to PhoneNumber objects on the way out. If E164 is used as storing format, no country code is needed for parsing the database value to PhoneNumber object.
# class User(self.Base):
# __tablename__ = 'user'
# id = sa.Column(sa.Integer, autoincrement=True, primary_key=True)
# name = sa.Column(sa.Unicode(255))
# phone_number = sa.Column(PhoneNumberType())
# user = User(phone_number='+358401234567')
# user.phone_number.e164 # u'+358401234567'
# user.phone_number.international # u'+358 40 1234567'
# user.phone_number.national # u'040 1234567'
# ScalarListType
# classsqlalchemy_utils.types.scalar_list.ScalarListType(coerce_func=<type 'unicode'>, separator=u', ')[source]
# ScalarListType type provides convenient way for saving multiple scalar values in one column. ScalarListType works like list on python side and saves the result as comma-separated list in the database (custom separators can also be used).
# Example
# from sqlalchemy_utils import ScalarListType
# class User(Base):
# __tablename__ = 'user'
# id = sa.Column(sa.Integer, autoincrement=True)
# hobbies = sa.Column(ScalarListType())
# user = User()
# user.hobbies = [u'football', u'ice_hockey']
# session.commit()
# You can easily set up integer lists too:
# from sqlalchemy_utils import ScalarListType
# class Player(Base):
# __tablename__ = 'player'
# id = sa.Column(sa.Integer, autoincrement=True)
# points = sa.Column(ScalarListType(int))
# player = Player()
# player.points = [11, 12, 8, 80]
# session.commit()
# TimezoneType
# classsqlalchemy_utils.types.timezone.TimezoneType(backend='dateutil')[source]
# TimezoneType provides a way for saving timezones (from either the pytz or the dateutil package) objects into database. TimezoneType saves timezone objects as strings on the way in and converts them back to objects when querying the database.
# from sqlalchemy_utils import TimezoneType
# class User(Base):
# __tablename__ = 'user'
# # Pass backend='pytz' to change it to use pytz (dateutil by
# # default)
# timezone = sa.Column(TimezoneType(backend='pytz'))
# TSVectorType
# classsqlalchemy_utils.types.ts_vector.TSVectorType(*args, **kwargs)[source]
# Note
# This type is PostgreSQL specific and is not supported by other dialects.
# Provides additional functionality for SQLAlchemy PostgreSQL dialect’s TSVECTOR type. This additional functionality includes:
# Vector concatenation
# regconfig constructor parameter which is applied to match function if no postgresql_regconfig parameter is given
# Provides extensible base for extensions such as SQLAlchemy-Searchable
# from sqlalchemy_utils import TSVectorType
# class Article(Base):
# __tablename__ = 'user'
# id = sa.Column(sa.Integer, primary_key=True)
# name = sa.Column(sa.String(100))
# search_vector = sa.Column(TSVectorType)
# # Find all articles whose name matches 'finland'
# session.query(Article).filter(Article.search_vector.match('finland'))
# TSVectorType also supports vector concatenation.
# class Article(Base):
# __tablename__ = 'user'
# id = sa.Column(sa.Integer, primary_key=True)
# name = sa.Column(sa.String(100))
# name_vector = sa.Column(TSVectorType)
# content = sa.Column(sa.String)
# content_vector = sa.Column(TSVectorType)
# # Find all articles whose name or content matches 'finland'
# session.query(Article).filter(
# (Article.name_vector | Article.content_vector).match('finland')
# )
# You can configure TSVectorType to use a specific regconfig.
# class Article(Base):
# __tablename__ = 'user'
# id = sa.Column(sa.Integer, primary_key=True)
# name = sa.Column(sa.String(100))
# search_vector = sa.Column(
# TSVectorType(regconfig='pg_catalog.simple')
# )
# Now expression such as:
# Article.search_vector.match('finland')
# Would be equivalent to SQL:
# search_vector @@ to_tsquery('pg_catalog.simgle', 'finland')
# URLType
# classsqlalchemy_utils.types.url.URLType(*args, **kwargs)[source]
# URLType stores furl objects into database.
# from sqlalchemy_utils import URLType
# from furl import furl
# class User(Base):
# __tablename__ = 'user'
# id = sa.Column(sa.Integer, primary_key=True)
# website = sa.Column(URLType)
# user = User(website=u'www.example.com')
# # website is coerced to furl object, hence all nice furl operations
# # come available
# user.website.args['some_argument'] = '12'
# print user.website
# # www.example.com?some_argument=12
# UUIDType
# classsqlalchemy_utils.types.uuid.UUIDType(binary=True, native=True)[source]
# Stores a UUID in the database natively when it can and falls back to a BINARY(16) or a CHAR(32) when it can’t.
# from sqlalchemy_utils import UUIDType
# import uuid
# class User(Base):
# __tablename__ = 'user'
# # Pass `binary=False` to fallback to CHAR instead of BINARY
# id = sa.Column(VARCHAR(50), primary_key=True)
# WeekDaysType
# classsqlalchemy_utils.types.weekdays.WeekDaysType(*args, **kwargs)[source]
# WeekDaysType offers way of saving WeekDays objects into database. The WeekDays objects | |
self.FocusOnLocalRegion == True:
TotalWidth = self.LocalRegionBound[1] - self.LocalRegionBound[0]
TotalHeight = self.LocalRegionBound[3] - self.LocalRegionBound[2]
else:
TotalWidth = self.MapEastBound - self.MapWestBound
TotalHeight = self.MapNorthBound - self.MapSouthBound
IntervalWidth = TotalWidth / self.NumGrideWidth
IntervalHeight = TotalHeight / self.NumGrideHeight
AllGrid = [Grid(i,[],[],0,[],{},[]) for i in range(NumGride)]
for key,value in NodeSet.items():
NowGridWidthNum = None
NowGridHeightNum = None
for i in range(self.NumGrideWidth):
if self.FocusOnLocalRegion == True:
LeftBound = (self.LocalRegionBound[0] + i * IntervalWidth)
RightBound = (self.LocalRegionBound[0] + (i+1) * IntervalWidth)
else:
LeftBound = (self.MapWestBound + i * IntervalWidth)
RightBound = (self.MapWestBound + (i+1) * IntervalWidth)
if key[0] > LeftBound and key[0] < RightBound:
NowGridWidthNum = i
break
for i in range(self.NumGrideHeight):
if self.FocusOnLocalRegion == True:
DownBound = (self.LocalRegionBound[2] + i * IntervalHeight)
UpBound = (self.LocalRegionBound[2] + (i+1) * IntervalHeight)
else:
DownBound = (self.MapSouthBound + i * IntervalHeight)
UpBound = (self.MapSouthBound + (i+1) * IntervalHeight)
if key[1] > DownBound and key[1] < UpBound:
NowGridHeightNum = i
break
if NowGridWidthNum == None or NowGridHeightNum == None :
print(key[0],key[1])
raise Exception('error')
else:
AllGrid[self.NumGrideWidth * NowGridHeightNum + NowGridWidthNum].Nodes.append((value,(key[0],key[1])))
#------------------------------------------------------
for i in AllGrid:
for j in i.Nodes:
self.NodeID2NodesLocation[j[0]] = j[1]
#Add neighbors to each grid
#------------------------------------------------------
for i in AllGrid:
#Bound Check
#----------------------------
UpNeighbor = True
DownNeighbor = True
LeftNeighbor = True
RightNeighbor = True
LeftUpNeighbor = True
LeftDownNeighbor = True
RightUpNeighbor = True
RightDownNeighbor = True
if i.ID >= self.NumGrideWidth * (self.NumGrideHeight - 1):
UpNeighbor = False
LeftUpNeighbor = False
RightUpNeighbor = False
if i.ID < self.NumGrideWidth:
DownNeighbor = False
LeftDownNeighbor = False
RightDownNeighbor = False
if i.ID % self.NumGrideWidth == 0:
LeftNeighbor = False
LeftUpNeighbor = False
LeftDownNeighbor = False
if (i.ID+1) % self.NumGrideWidth == 0:
RightNeighbor = False
RightUpNeighbor = False
RightDownNeighbor = False
#----------------------------
#Add all neighbors
#----------------------------
if UpNeighbor:
i.Neighbor.append(AllGrid[i.ID+self.NumGrideWidth])
if DownNeighbor:
i.Neighbor.append(AllGrid[i.ID-self.NumGrideWidth])
if LeftNeighbor:
i.Neighbor.append(AllGrid[i.ID-1])
if RightNeighbor:
i.Neighbor.append(AllGrid[i.ID+1])
if LeftUpNeighbor:
i.Neighbor.append(AllGrid[i.ID+self.NumGrideWidth-1])
if LeftDownNeighbor:
i.Neighbor.append(AllGrid[i.ID-self.NumGrideWidth-1])
if RightUpNeighbor:
i.Neighbor.append(AllGrid[i.ID+self.NumGrideWidth+1])
if RightDownNeighbor:
i.Neighbor.append(AllGrid[i.ID-self.NumGrideWidth+1])
#----------------------------
#You can draw every grid(red) and neighbor(random color) here
#----------------------------------------------
'''
for i in range(len(AllGrid)):
print("Grid ID ",i,AllGrid[i])
print(AllGrid[i].Neighbor)
self.DrawOneCluster(Cluster = AllGrid[i],random = False,show = False)
for j in AllGrid[i].Neighbor:
if j.ID == AllGrid[i].ID :
continue
print(j.ID)
self.DrawOneCluster(Cluster = j,random = True,show = False)
plt.xlim(104.007, 104.13)
plt.ylim(30.6119, 30.7092)
plt.show()
'''
#----------------------------------------------
return AllGrid
def CreateCluster(self):
NodeLocation = self.Node[['Longitude','Latitude']].values.round(7)
NodeID = self.Node['NodeID'].values.astype('int64')
#Set Nodes In Limit Region
#----------------------------------------
if self.FocusOnLocalRegion == True:
print("Remove out-of-bounds Nodes")
NodeLocation = NodeLocation.tolist()
NodeID = NodeID.tolist()
TempNodeList = []
for i in range(len(NodeLocation)):
TempNodeList.append((NodeLocation[i],NodeID[i]))
for i in TempNodeList[:]:
if self.IsNodeInLimitRegion(i) == False:
TempNodeList.remove(i)
NodeLocation.clear()
NodeID.clear()
for i in TempNodeList:
#NodeLocation.append(i[0])
NodeLocation.append(i[0])
NodeID.append(i[1])
NodeLocation = np.array(NodeLocation)
#----------------------------------------
N = {}
for i in range(len(NodeID)):
N[(NodeLocation[i][0],NodeLocation[i][1])] = NodeID[i]
Clusters=[Cluster(i,[],[],0,[],{},[]) for i in range(self.ClustersNumber)]
ClusterPath = './data/'+str(self.LocalRegionBound)+str(self.ClustersNumber)+str(self.ClusterMode)+'Clusters.csv'
if os.path.exists(ClusterPath):
reader = pd.read_csv(ClusterPath,chunksize = 1000)
label_pred = []
for chunk in reader:
label_pred.append(chunk)
label_pred = pd.concat(label_pred)
label_pred = label_pred.values
label_pred = label_pred.flatten()
label_pred = label_pred.astype('int64')
else:
raise Exception('Cluster Path not found')
#Loading Clustering results into simulator
print("Loading Clustering results")
for i in range(self.ClustersNumber):
temp = NodeLocation[label_pred == i]
for j in range(len(temp)):
Clusters[i].Nodes.append((self.NodeIDList.index(N[(temp[j,0],temp[j,1])]),(temp[j,0],temp[j,1])))
SaveClusterNeighborPath = './data/'+str(self.LocalRegionBound)+str(self.ClustersNumber)+str(self.ClusterMode)+'Neighbor.csv'
if not os.path.exists(SaveClusterNeighborPath):
print("Computing Neighbor relationships between clusters")
AllNeighborList = []
for i in Clusters:
NeighborList = []
for j in Clusters:
if i == j:
continue
else:
TempSumCost = 0
for k in i.Nodes:
for l in j.Nodes:
TempSumCost += self.RoadCost(k[0],l[0])
if (len(i.Nodes)*len(j.Nodes)) == 0:
RoadNetworkDistance = 99999
else:
RoadNetworkDistance = TempSumCost / (len(i.Nodes)*len(j.Nodes))
NeighborList.append((j,RoadNetworkDistance))
NeighborList.sort(key=lambda X: X[1])
AllNeighborList.append([])
for j in NeighborList:
AllNeighborList[-1].append((j[0].ID,j[1]))
AllNeighborList = pd.DataFrame(AllNeighborList)
AllNeighborList.to_csv(SaveClusterNeighborPath,header=0,index=0) #不保存列名
print("Save the Neighbor relationship records to: "+SaveClusterNeighborPath)
print("Load Neighbor relationship records")
reader = pd.read_csv(SaveClusterNeighborPath,header = None,chunksize = 1000)
NeighborList = []
for chunk in reader:
NeighborList.append(chunk)
NeighborList = pd.concat(NeighborList)
NeighborList = NeighborList.values
ID2Cluseter = {}
for i in Clusters:
ID2Cluseter[i.ID] = i
ConnectedThreshold = 15
for i in range(len(Clusters)):
for j in NeighborList[i]:
temp = eval(j)
if len(Clusters[i].Neighbor) < 4:
Clusters[i].Neighbor.append(ID2Cluseter[temp[0]])
elif temp[1] < ConnectedThreshold:
Clusters[i].Neighbor.append(ID2Cluseter[temp[0]])
else:
continue
del ID2Cluseter
#self.NodeID2NodesLocation = {}
print("Store node coordinates for drawing")
for i in Clusters:
for j in i.Nodes:
self.NodeID2NodesLocation[j[0]] = j[1]
#You can draw every cluster(red) and neighbor(random color) here
#----------------------------------------------
'''
for i in range(len(Clusters)):
print("Cluster ID ",i,Clusters[i])
print(Clusters[i].Neighbor)
self.DrawOneCluster(Cluster = Clusters[i],random = False,show = False)
for j in Clusters[i].Neighbor:
if j.ID == Clusters[i].ID :
continue
print(j.ID)
self.DrawOneCluster(Cluster = j,random = True,show = False)
plt.xlim(104.007, 104.13)
plt.ylim(30.6119, 30.7092)
plt.show()
'''
#----------------------------------------------
return Clusters
def LoadDemandPrediction(self):
if self.DemandPredictionMode == 'None' or self.DemandPredictionMode == "Training":
self.DemandPredictorModule = None
return
elif self.DemandPredictionMode == 'HA':
self.DemandPredictorModule = HAPredictionModel()
DemandPredictionModelPath = "./model/"+str(self.DemandPredictionMode)+"PredictionModel"+str(self.ClusterMode)+str(self.SideLengthMeter)+str(self.LocalRegionBound)+".csv"
#You can extend the predictor here
#elif self.DemandPredictionMode == 'Your predictor name':
else:
raise Exception('DemandPredictionMode Name error')
if os.path.exists(DemandPredictionModelPath):
self.DemandPredictorModule.Load(DemandPredictionModelPath)
else:
print(DemandPredictionModelPath)
raise Exception("No Demand Prediction Model")
return
def Normaliztion_1D(self,arr):
arrmax = arr.max()
arrmin = arr.min()
arrmaxmin = arrmax - arrmin
result = []
for x in arr:
x = float(x - arrmin)/arrmaxmin
result.append(x)
return np.array(result)
#Visualization tools
#-----------------------------------------------
def randomcolor(self):
colorArr = ['1','2','3','4','5','6','7','8','9','A','B','C','D','E','F']
color = ""
for i in range(6):
color += colorArr[random.randint(0,len(colorArr)-1)]
return "#"+color
def DrawAllClusterInternalNodes(self):
ConnectionMap = ReadMap('./data/Map__.csv'),
ConnectionMap = ConnectionMap[0]
ClusetersColor = []
for i in range(len(self.Clusters)):
ClusetersColor.append(self.randomcolor())
NodeNumber = len(self.Node)
for i in tqdm(range(NodeNumber)):
if not i in self.NodeID2NodesLocation:
continue
for j in range(NodeNumber):
if not j in self.NodeID2NodesLocation:
continue
if i == j:
continue
if ConnectionMap[i][j] <= 3000:
LX = [self.NodeID2NodesLocation[i][0],self.NodeID2NodesLocation[j][0]]
LY = [self.NodeID2NodesLocation[i][1],self.NodeID2NodesLocation[j][1]]
if self.NodeID2Cluseter[i] == self.NodeID2Cluseter[j]:
plt.plot(LX,LY,c=ClusetersColor[self.NodeID2Cluseter[i].ID],linewidth=0.8,alpha = 0.5)
else:
plt.plot(LX,LY,c='grey',linewidth=0.5,alpha = 0.4)
plt.xlim(self.MapWestBound , self.MapEastBound)
plt.ylim(self.MapSouthBound , self.MapNorthBound)
plt.title(self.ClusterMode)
plt.show()
return
def DrawAllNodes(self):
ConnectionMap = ReadMap('./data/Map__.csv'),
ConnectionMap = ConnectionMap[0]
ClusetersColor = []
for i in range(len(self.Clusters)):
ClusetersColor.append(self.randomcolor())
NodeNumber = len(self.Node)
for i in range(NodeNumber):
if not i in self.NodeID2NodesLocation:
continue
for j in range(NodeNumber):
if not j in self.NodeID2NodesLocation:
continue
if i == j:
continue
if ConnectionMap[i][j] <= 3000:
LX = [self.NodeID2NodesLocation[i][0],self.NodeID2NodesLocation[j][0]]
LY = [self.NodeID2NodesLocation[i][1],self.NodeID2NodesLocation[j][1]]
plt.plot(LX,LY,c=ClusetersColor[self.NodeID2Cluseter[i].ID],linewidth=0.8,alpha = 0.5)
plt.xlim(self.MapWestBound , self.MapEastBound)
plt.ylim(self.MapSouthBound , self.MapNorthBound)
plt.title(self.ClusterMode)
plt.show()
return
def DrawOneCluster(self,Cluster,random=True,show=False):
randomc = self.randomcolor()
for i in Cluster.Nodes:
if random == True:
plt.scatter(i[1][0],i[1][1],s = 3, c=randomc,alpha = 0.5)
else :
plt.scatter(i[1][0],i[1][1],s = 3, c='r',alpha = 0.5)
if show == True:
plt.xlim(self.MapWestBound , self.MapEastBound)
plt.ylim(self.MapSouthBound , self.MapNorthBound)
plt.show()
def DrawAllVehicles(self):
for i in self.Clusters:
for j in i.IdleVehicles:
res = self.NodeID2NodesLocation[j.LocationNode]
X = res[0]
Y = res[1]
plt.scatter(X,Y,s = 3, c='b',alpha = 0.3)
for key in i.VehiclesArrivetime:
res = self.NodeID2NodesLocation[key.LocationNode]
X = res[0]
Y = res[1]
if len(key.Orders):
plt.scatter(X,Y,s = 3, c='r',alpha = 0.3)
else :
plt.scatter(X,Y,s = 3, c='g',alpha = 0.3)
plt.xlim(self.MapWestBound , self.MapEastBound)
plt.xlabel("red = running blue = idle green = Dispatch")
plt.ylim(self.MapSouthBound , self.MapNorthBound)
plt.title("Vehicles Location")
plt.show()
return
def DrawVehicleTrajectory(self,Vehicle):
X1,Y1 = self.NodeID2NodesLocation[Vehicle.LocationNode]
X2,Y2 = self.NodeID2NodesLocation[Vehicle.DeliveryPoint]
#start location
plt.scatter(X1,Y1,s = 3, c='black',alpha = 0.3)
#destination
plt.scatter(X2,Y2,s = 3, c='blue',alpha = 0.3)
#Vehicles Trajectory
LX1=[X1,X2]
LY1=[Y1,Y2]
plt.plot(LY1,LX1,c='k',linewidth=0.3,alpha = 0.5)
plt.title("Vehicles Trajectory")
plt.show()
return
#-----------------------------------------------
def WorkdayOrWeekend(self,day):
if type(day) != type(0) or day<0 or day > 6:
raise Exception('input format error')
elif day == 5 or day == 6:
return "Weekend"
else:
return "Workday"
def GetTimeAndWeather(self,Order):
Month = Order.ReleasTime.month
Day = Order.ReleasTime.day
Week = Order.ReleasTime.weekday()
if Week == 5 or Week == 6:
Weekend = 1
else:
Weekend = 0
Hour = Order.ReleasTime.hour
Minute = Order.ReleasTime.minute
if Month == 11:
if Hour < 12:
WeatherType = self.WeatherType[2*(Day-1)]
else:
WeatherType = self.WeatherType[2*(Day-1)+1]
else:
raise Exception('Month format error')
MinimumTemperature = self.MinimumTemperature[Day-1]
MaximumTemperature = self.MaximumTemperature[Day-1]
WindDirection = self.WindDirection[Day-1]
WindPower = self.WindPower[Day-1]
return [Day,Week,Weekend,Hour,Minute,WeatherType,MinimumTemperature,MaximumTemperature,WindDirection,WindPower]
############################################################################
#The main modules
#---------------------------------------------------------------------------
def DemandPredictFunction(self):
"""
Here you can implement your own order forecasting method
to provide efficient and accurate help for Dispatch method
"""
return
def SupplyExpectFunction(self):
"""
Calculate the number of idle Vehicles in the next time slot
of each cluster due to the completion of the order
"""
self.SupplyExpect = np.zeros(self.ClustersNumber)
for i in self.Clusters:
for key,value in list(i.VehiclesArrivetime.items()):
#key = Vehicle ; value = Arrivetime
if value <= self.RealExpTime + self.TimePeriods and len(key.Orders)>0:
self.SupplyExpect[i.ID] += 1
return
def DispatchFunction(self):
"""
Here you can | |
change what types of input the query builder accepts.
# The default set of available relations. Relations with aliases are
# treated as their aliases. E.g., a search like ['Form', 'source_id' '=', ...]
# will generate the filter model.Form.source_id.__eq__(...)
relations = {
'__eq__': {},
'=': {'alias': '__eq__'},
'__ne__': {},
'!=': {'alias': '__ne__'},
'like': {},
'regexp': {},
'regex': {'alias': 'regexp'},
'__lt__': {},
'<': {'alias': '__lt__'},
'__gt__': {},
'>': {'alias': '__gt__'},
'__le__': {},
'<=': {'alias': '__le__'},
'__ge__': {},
'>=': {'alias': '__ge__'},
'in_': {},
'in': {'alias': 'in_'}
}
equality_relations = {
'__eq__': {},
'=': {'alias': '__eq__'},
'__ne__': {},
'!=': {'alias': '__ne__'}
}
# The schema attribute describes the database structure in a way that allows
# the query builder to properly interpret the list-based queries and
# generate errors where necessary. Maps model names to attribute names.
# Attribute names whose values contain an 'alias' key are treated as the
# value of that key, e.g., ['Form', 'enterer' ...] will be treated as
# Form.enterer_id... The relations listed in self.relations above are the
# default for all attributes. This can be overridden by specifying a
# 'relation' key (cf. schema['Form']['translations'] below). Certain
# attributes require value converters -- functions that change the value in
# some attribute-specific way, e.g., conversion of ISO 8601 datetimes to
# Python datetime objects.
schema = {
'Collection': {
'id': {},
'UUID': {},
'title': {},
'type': {},
'url': {},
'description': {},
'markup_language': {},
'contents': {},
'html': {},
'speaker': {'foreign_model': 'Speaker', 'type': 'scalar'},
'source': {'foreign_model': 'Source', 'type': 'scalar'},
'elicitor': {'foreign_model': 'User', 'type': 'scalar'},
'enterer': {'foreign_model': 'User', 'type': 'scalar'},
'date_elicited': {'value_converter': '_get_date_value'},
'datetime_entered': {'value_converter': '_get_datetime_value'},
'datetime_modified': {'value_converter': '_get_datetime_value'},
'tags': {'foreign_model': 'Tag', 'type': 'collection'},
'forms': {'foreign_model': 'Form', 'type': 'collection'},
'files': {'foreign_model': 'File', 'type': 'collection'}
},
'CollectionBackup': {
'id': {},
'UUID': {},
'collection_id': {},
'title': {},
'type': {},
'url': {},
'description': {},
'markup_language': {},
'contents': {},
'html': {},
'speaker': {},
'source': {},
'elicitor': {},
'enterer': {},
'date_elicited': {'value_converter': '_get_date_value'},
'datetime_entered': {'value_converter': '_get_datetime_value'},
'datetime_modified': {'value_converter': '_get_datetime_value'},
'tags': {},
'forms': {},
'files': {}
},
'Corpus': {
'id': {},
'UUID': {},
'name': {},
'type': {},
'description': {},
'content': {},
'enterer': {'foreign_model': 'User', 'type': 'scalar'},
'modifier': {'foreign_model': 'User', 'type': 'scalar'},
'form_search': {'foreign_model': 'FormSearch', 'type': 'scalar'},
'datetime_entered': {'value_converter': '_get_datetime_value'},
'datetime_modified': {'value_converter': '_get_datetime_value'},
'tags': {'foreign_model': 'Tag', 'type': 'collection'},
'forms': {'foreign_model': 'Form', 'type': 'collection'}
},
'CorpusBackup': {
'id': {},
'UUID': {},
'name': {},
'type': {},
'description': {},
'content': {},
'enterer': {},
'modifier': {},
'datetime_entered': {'value_converter': '_get_datetime_value'},
'datetime_modified': {'value_converter': '_get_datetime_value'},
'tags': {},
'forms': {}
},
'ElicitationMethod': {
'id': {},
'name': {},
'description': {},
'datetime_modified': {'value_converter': '_get_datetime_value'},
},
'Form': {
'id': {},
'UUID': {},
'transcription': {},
'phonetic_transcription': {},
'narrow_phonetic_transcription': {},
'morpheme_break': {},
'morpheme_gloss': {},
'comments': {},
'speaker_comments': {},
'grammaticality': {},
'date_elicited': {'value_converter': '_get_date_value'},
'datetime_entered': {'value_converter': '_get_datetime_value'},
'datetime_modified': {'value_converter': '_get_datetime_value'},
'syntactic_category_string': {},
'morpheme_break_ids': {},
'morpheme_gloss_ids': {},
'break_gloss_category': {},
'syntax': {},
'semantics': {},
'status': {},
'elicitor': {'foreign_model': 'User', 'type': 'scalar'},
'enterer': {'foreign_model': 'User', 'type': 'scalar'},
'verifier': {'foreign_model': 'User', 'type': 'scalar'},
'modifier': {'foreign_model': 'User', 'type': 'scalar'},
'speaker': {'foreign_model': 'Speaker', 'type': 'scalar'},
'elicitation_method': {'foreign_model': 'ElicitationMethod', 'type': 'scalar'},
'syntactic_category': {'foreign_model': 'SyntacticCategory', 'type': 'scalar'},
'source': {'foreign_model': 'Source', 'type': 'scalar'},
'translations': {'foreign_model': 'Translation', 'type': 'collection'},
'tags': {'foreign_model': 'Tag', 'type': 'collection'},
'files': {'foreign_model': 'File', 'type': 'collection'},
'collections': {'foreign_model': 'Collection', 'type': 'collection'},
'memorizers': {'foreign_model': 'User', 'type': 'collection'},
'corpora': {'foreign_model': 'Corpus', 'type': 'collection'}
},
'FormBackup': {
'id': {},
'UUID': {},
'form_id': {},
'transcription': {},
'phonetic_transcription': {},
'narrow_phonetic_transcription': {},
'morpheme_break': {},
'morpheme_gloss': {},
'comments': {},
'speaker_comments': {},
'grammaticality': {},
'date_elicited': {'value_converter': '_get_date_value'},
'datetime_entered': {'value_converter': '_get_datetime_value'},
'datetime_modified': {'value_converter': '_get_datetime_value'},
'syntactic_category_string': {},
'morpheme_break_ids': {},
'morpheme_gloss_ids': {},
'break_gloss_category': {},
'syntax': {},
'semantics': {},
'elicitor': {},
'enterer': {},
'verifier': {},
'speaker': {},
'elicitation_method': {},
'syntactic_category': {},
'source': {},
'translations': {},
'tags': {},
'files': {},
'collections': {}
},
'FormSearch': {
'id': {},
'name': {},
'search': {},
'description': {},
'enterer': {'foreign_model': 'User', 'type': 'scalar'},
'datetime_modified': {'value_converter': '_get_datetime_value'}
},
'File': {
'id': {},
'filename': {},
'name': {},
'MIME_type': {},
'size': {},
'enterer': {'foreign_model': 'User', 'type': 'scalar'},
'description': {},
'date_elicited': {'value_converter': '_get_date_value'},
'datetime_entered': {'value_converter': '_get_datetime_value'},
'datetime_modified': {'value_converter': '_get_datetime_value'},
'elicitor': {'foreign_model': 'User', 'type': 'scalar'},
'speaker': {'foreign_model': 'Speaker', 'type': 'scalar'},
'parent_file': {'foreign_model': 'File', 'type': 'scalar'},
'utterance_type': {},
'start': {},
'end': {},
'url': {},
'password': {},
'tags': {'foreign_model': 'Tag', 'type': 'collection'},
'forms': {'foreign_model': 'Form', 'type': 'collection'},
'collections': {'foreign_model': 'Collection', 'type': 'collection'}
},
'Translation': {
'id': {},
'transcription': {},
'grammaticality': {},
'datetime_modified': {'value_converter': '_get_datetime_value'}
},
'Language': {
'Id': {},
'Part2B': {},
'Part2T': {},
'Part1': {},
'Scope': {},
'Type': {},
'Ref_Name': {},
'Comment': {},
'datetime_modified': {'value_converter': '_get_datetime_value'}
},
'Memorizer': {
'id': {},
'first_name': {},
'last_name': {},
'role': {}
},
'MorphemeLanguageModel': {
'id': {},
'UUID': {},
'name': {},
'description': {},
'smoothing': {},
'order': {},
'corpus': {'foreign_model': 'Corpus', 'type': 'scalar'},
'vocabulary_morphology': {'foreign_model': 'Morphology', 'type': 'scalar'},
'enterer': {'foreign_model': 'User', 'type': 'scalar'},
'modifier': {'foreign_model': 'User', 'type': 'scalar'},
'datetime_entered': {'value_converter': '_get_datetime_value'},
'datetime_modified': {'value_converter': '_get_datetime_value'},
'estimation_succeeded': {},
'estimation_message': {},
'estimation_attempt': {}
},
'MorphemeLanguageModelBackup': {
'id': {},
'UUID': {},
'name': {},
'description': {},
'corpus': {},
'enterer': {},
'modifier': {},
'datetime_entered': {'value_converter': '_get_datetime_value'},
'datetime_modified': {'value_converter': '_get_datetime_value'},
'estimation_succeeded': {},
'estimation_message': {},
'estimation_attempt': {}
},
'MorphologicalParser': {
'id': {},
'UUID': {},
'name': {},
'description': {},
'phonology': {'foreign_model': 'Phonology', 'type': 'scalar'},
'morphology': {'foreign_model': 'Morphology', 'type': 'scalar'},
'language_model': {'foreign_model': 'MorphemeLanguageModel', 'type': 'scalar'},
'enterer': {'foreign_model': 'User', 'type': 'scalar'},
'modifier': {'foreign_model': 'User', 'type': 'scalar'},
'datetime_entered': {'value_converter': '_get_datetime_value'},
'datetime_modified': {'value_converter': '_get_datetime_value'},
'compile_succeeded': {},
'compile_message': {},
'compile_attempt': {},
},
'MorphologicalParserBackup': {
'id': {},
'morphologicalparser_id': {},
'UUID': {},
'name': {},
'description': {},
'phonology': {},
'morphology': {},
'language_model': {},
'enterer': {},
'modifier': {},
'datetime_entered': {'value_converter': '_get_datetime_value'},
'datetime_modified': {'value_converter': '_get_datetime_value'},
'compile_succeeded': {},
'compile_message': {},
'compile_attempt': {},
},
'Morphology': {
'id': {},
'UUID': {},
'name': {},
'description': {},
'enterer': {'foreign_model': 'User', 'type': 'scalar'},
'modifier': {'foreign_model': 'User', 'type': 'scalar'},
'datetime_entered': {'value_converter': '_get_datetime_value'},
'datetime_modified': {'value_converter': '_get_datetime_value'},
'compile_succeeded': {},
'compile_message': {},
'compile_attempt': {},
'generate_attempt': {},
'extract_morphemes_from_rules_corpus': {},
'rules': {},
'rules_generated': {},
'script_type': {},
'lexicon_corpus': {'foreign_model': 'Corpus', 'type': 'scalar'},
'rules_corpus': {'foreign_model': 'Corpus', 'type': 'scalar'}
},
'MorphologyBackup': {
'id': {},
'morphology_id': {},
'UUID': {},
'name': {},
'description': {},
'enterer': {},
'modifier': {},
'datetime_entered': {'value_converter': '_get_datetime_value'},
'datetime_modified': {'value_converter': '_get_datetime_value'},
'compile_succeeded': {},
'compile_message': {},
'compile_attempt': {},
'generate_attempt': {},
'extract_morphemes_from_rules_corpus': {},
'script_type': {},
'lexicon_corpus': {},
'rules_corpus': {},
'rules': {}
},
'Orthography': {
'id': {},
'name': {},
'orthography': {},
'lowercase': {},
'initial_glottal_stops': {},
'datetime_modified': {'value_converter': '_get_datetime_value'}
},
'Phonology': {
'id': {},
'UUID': {},
'name': {},
'description': {},
'script': {},
'enterer': {'foreign_model': 'User', 'type': 'scalar'},
'modifier': {'foreign_model': 'User', 'type': 'scalar'},
'datetime_entered': {'value_converter': '_get_datetime_value'},
'datetime_modified': {'value_converter': '_get_datetime_value'},
'datetime_compiled': {'value_converter': '_get_datetime_value'},
'compile_succeeded': {},
'compile_message': {},
},
'PhonologyBackup': {
'id': {},
'phonology_id': {},
'UUID': {},
'name': {},
'description': {},
'script': {},
'enterer': {},
'modifier': {},
'datetime_entered': {'value_converter': '_get_datetime_value'},
'datetime_modified': {'value_converter': '_get_datetime_value'},
'datetime_compiled': {'value_converter': '_get_datetime_value'},
'compile_succeeded': {},
'compile_message': {},
},
'Source': {
'id': {},
'file_id': {},
'file': {'foreign_model': 'File', 'type': 'scalar'},
'datetime_modified': {'value_converter': '_get_datetime_value'},
'type': {},
'key': {},
'address': {},
'annote': {},
'author': {},
'booktitle': {},
'chapter': {},
'crossref': {},
'edition': {},
'editor': {},
'howpublished': {},
'institution': {},
'journal': {},
'key_field': {},
'month': {},
'note': {},
'number': {},
'organization': {},
'pages': {},
'publisher': {},
'school': {},
'series': {},
'title': {},
'type_field': {},
'url': {},
'volume': {},
'year': {},
'affiliation': {},
'abstract': {},
'contents': {},
'copyright': {},
'ISBN': {},
'ISSN': {},
'keywords': {},
'language': {},
'location': {},
'LCCN': {},
'mrnumber': {},
'price': {},
'size': {}
},
'Speaker': {
'id': {},
'first_name': {},
'last_name': {},
'dialect': {},
'page_content': {},
'markup_language': {},
'html': {},
'datetime_modified': {'value_converter': '_get_datetime_value'}
},
'SyntacticCategory': {
'id': {},
'name': {},
'type': {},
'description': {},
'datetime_modified': {'value_converter': '_get_datetime_value'}
},
'User': {
'id': {},
'first_name': {},
'last_name': {},
'email': {},
'affiliation': {},
'role': {},
'markup_language': {},
'page_content': {},
'html': {},
'input_orthography': {'foreign_model': 'Orthography', 'type': 'scalar'},
'output_orthography': {'foreign_model': 'Orthography', 'type': 'scalar'},
'datetime_modified': {'value_converter': '_get_datetime_value'},
'remembered_forms': {'foreign_model': 'Form', 'type': 'collection'}
},
'Tag': {
'id': {},
'name': {},
'description': {},
'datetime_modified': {'value_converter': '_get_datetime_value'}
},
'Keyboard': {
'id': {},
'name': {},
'description': {},
'datetime_modified': {'value_converter': '_get_datetime_value'},
'datetime_entered': {'value_converter': '_get_datetime_value'},
'enterer': {'foreign_model': 'User', 'type': 'scalar'},
'modifier': {'foreign_model': 'User', 'type': 'scalar'}
}
}
model_aliases = {
| |
== 2:
ratioRaw = (
(fvPos[1] - ymin) /
float(ymax - ymin))
elif axis == 3:
ratioRaw = (
(fvPos[2] - zmin) /
float(zmax - zmin))
ratio = max(min(ratioRaw, 1), 0)
outColor = maya.cmds.colorAtPoint(
'SXRamp', o='RGB', u=(ratio), v=(ratio))
outAlpha = maya.cmds.colorAtPoint(
'SXAlphaRamp', o='A', u=(ratio), v=(ratio))
if outAlpha[0] > 0:
fvColors[idx].r = outColor[0]
fvColors[idx].g = outColor[1]
fvColors[idx].b = outColor[2]
else:
fvColors[idx].r = outAlpha[0]
fvColors[idx].g = outAlpha[0]
fvColors[idx].b = outAlpha[0]
fvColors[idx].a = outAlpha[0]
break
fvIt.next()
else:
fvIt = OM.MItMeshFaceVertex(selDagPath)
k = 0
while not fvIt.isDone():
ratioRaw = None
ratio = None
# faceIds[k] = fvIt.faceId()
# vtxIds[k] = fvIt.vertexId()
fvPos = fvIt.position(space)
if axis == 1:
ratioRaw = (
(fvPos[0] - xmin) /
float(xmax - xmin))
elif axis == 2:
ratioRaw = (
(fvPos[1] - ymin) /
float(ymax - ymin))
elif axis == 3:
ratioRaw = (
(fvPos[2] - zmin) /
float(zmax - zmin))
ratio = max(min(ratioRaw, 1), 0)
outColor = maya.cmds.colorAtPoint(
'SXRamp', o='RGB', u=(ratio), v=(ratio))
outAlpha = maya.cmds.colorAtPoint(
'SXAlphaRamp', o='A', u=(ratio), v=(ratio))
if outAlpha[0] > 0:
fvColors[k].r = outColor[0]
fvColors[k].g = outColor[1]
fvColors[k].b = outColor[2]
else:
fvColors[k].r = outAlpha[0]
fvColors[k].g = outAlpha[0]
fvColors[k].b = outAlpha[0]
fvColors[k].a = outAlpha[0]
k += 1
fvIt.next()
# sxglobals.layers.setColorSet(sxglobals.settings.tools['selectedLayer'])
mesh.setFaceVertexColors(fvColors, faceIds, vtxIds, mod, colorRep)
selectionIter.next()
mod.doIt()
totalTime = maya.cmds.timerX(startTime=startTimeOcc)
print('SX Tools: Gradient Fill duration ' + str(totalTime))
def colorFill(self, overwriteAlpha=False, palette=False):
#startTimeOcc = maya.cmds.timerX()
layer = sxglobals.settings.tools['selectedLayer']
sxglobals.layers.setColorSet(layer)
fillColor = OM.MColor()
mod = OM.MDGModifier()
colorRep = OM.MFnMesh.kRGBA
fillColor.r = sxglobals.settings.currentColor[0]
fillColor.g = sxglobals.settings.currentColor[1]
fillColor.b = sxglobals.settings.currentColor[2]
fillColor.a = 1.0
if len(sxglobals.settings.componentArray) > 0:
# Convert component selection to face vertices,
# fill position-matching verts with color
selection = maya.cmds.ls(
maya.cmds.polyListComponentConversion(
sxglobals.settings.selectionArray, tvf=True), fl=True)
else:
selection = sxglobals.settings.shapeArray
selectionList = OM.MSelectionList()
for sl in selection:
selectionList.add(sl)
selDagPath = OM.MDagPath()
fVert = OM.MObject()
fvColors = OM.MColorArray()
vtxIds = OM.MIntArray()
fvIds = OM.MIntArray()
faceIds = OM.MIntArray()
compDagPath = OM.MDagPath()
selectionIter = OM.MItSelectionList(selectionList)
while not selectionIter.isDone():
# Gather full mesh data to compare selection against
selDagPath = selectionIter.getDagPath()
mesh = OM.MFnMesh(selDagPath)
fvColors.clear()
fvColors = mesh.getFaceVertexColors(colorSet=layer)
selLen = len(fvColors)
vtxIds.setLength(selLen)
fvIds.setLength(selLen)
faceIds.setLength(selLen)
meshIter = OM.MItMeshFaceVertex(selDagPath)
i = 0
while not meshIter.isDone():
vtxIds[i] = meshIter.vertexId()
faceIds[i] = meshIter.faceId()
fvIds[i] = meshIter.faceVertexId()
i += 1
meshIter.next()
if selectionIter.hasComponents():
(compDagPath, fVert) = selectionIter.getComponent()
# Iterate through selected vertices on current selection
fvIt = OM.MItMeshFaceVertex(selDagPath, fVert)
while not fvIt.isDone():
faceId = fvIt.faceId()
fvId = fvIt.faceVertexId()
vtxId = fvIt.vertexId()
for idx in xrange(selLen):
if (faceId == faceIds[idx] and
fvId == fvIds[idx] and
vtxId == vtxIds[idx] and
compDagPath == selDagPath):
fvColors[idx] = fillColor
break
fvIt.next()
else:
if palette:
for idx in xrange(selLen):
if fvColors[idx].a == 0 and layer != 'layer1':
fvColors[idx].r = 0.0
fvColors[idx].g = 0.0
fvColors[idx].b = 0.0
else:
fvColors[idx].r = fillColor.r
fvColors[idx].g = fillColor.g
fvColors[idx].b = fillColor.b
elif overwriteAlpha:
for idx in xrange(selLen):
fvColors[idx] = fillColor
elif (not overwriteAlpha) and (sxglobals.settings.layerAlphaMax == 0):
for idx in xrange(selLen):
fvColors[idx] = fillColor
elif ((not overwriteAlpha) and (sxglobals.settings.layerAlphaMax != 0)):
for idx in xrange(selLen):
fvColors[idx].r = fillColor.r
fvColors[idx].g = fillColor.g
fvColors[idx].b = fillColor.b
else:
fvColors = [fillColor] * selLen
mesh.setFaceVertexColors(fvColors, faceIds, vtxIds, mod, colorRep)
mod.doIt()
selectionIter.next()
if sxglobals.settings.tools['noiseValue'] > 0:
self.colorNoise()
#totalTime = maya.cmds.timerX(startTime=startTimeOcc)
#print('SX Tools: Apply Color duration ' + str(totalTime))
if not palette:
sxglobals.layers.refreshLayerList()
sxglobals.layers.compositeLayers()
def colorNoise(self):
mono = sxglobals.settings.tools['noiseMonochrome']
color = sxglobals.settings.currentColor
value = sxglobals.settings.tools['noiseValue']
layer = sxglobals.settings.tools['selectedLayer']
if len(sxglobals.settings.componentArray) > 0:
# Convert component selection to vertices,
# fill position-matching verts with color
selection = maya.cmds.polyListComponentConversion(
sxglobals.settings.selectionArray, tv=True, internal=True)
else:
selection = sxglobals.settings.shapeArray
selectionList = OM.MSelectionList()
for sl in selection:
selectionList.add(sl)
# selectionList = OM.MGlobal.getActiveSelectionList()
selDagPath = OM.MDagPath()
vert = OM.MObject()
vtxColors = OM.MColorArray()
vtxPosArray = OM.MPointArray()
vtxIds = OM.MIntArray()
compDagPath = OM.MDagPath()
selectionIter = OM.MItSelectionList(selectionList)
while not selectionIter.isDone():
# Gather full mesh data to compare selection against
selDagPath = selectionIter.getDagPath()
mesh = OM.MFnMesh(selDagPath)
vtxColors.clear()
vtxColors = mesh.getVertexColors(colorSet=layer)
selLen = len(vtxColors)
vtxIds.setLength(selLen)
vtxPosArray.setLength(selLen)
changedCols = OM.MColorArray()
changedIds = OM.MIntArray()
meshIter = OM.MItMeshVertex(selDagPath)
while not meshIter.isDone():
i = meshIter.index()
vtxIds[i] = meshIter.index()
vtxPosArray[i] = meshIter.position()
meshIter.next()
if selectionIter.hasComponents():
(compDagPath, vert) = selectionIter.getComponent()
# Iterate through selected vertices on current selection
vtxIt = OM.MItMeshVertex(selDagPath, vert)
while not vtxIt.isDone():
vtxPos = vtxIt.position()
for idx in xrange(selLen):
if (vtxPos == vtxPosArray[idx] and
compDagPath == selDagPath):
if mono:
randomOffset = 1 - random.uniform(0, value)
vtxColors[idx].r *= randomOffset
vtxColors[idx].g *= randomOffset
vtxColors[idx].b *= randomOffset
else:
vtxColors[idx].r += random.uniform(-color[0]*value, color[0]*value)
vtxColors[idx].g += random.uniform(-color[1]*value, color[1]*value)
vtxColors[idx].b += random.uniform(-color[2]*value, color[2]*value)
changedCols.append(vtxColors[idx])
changedIds.append(idx)
break
vtxIt.next()
mesh.setVertexColors(changedCols, changedIds)
selectionIter.next()
else:
vtxColors = OM.MColorArray()
vtxColors = mesh.getVertexColors(colorSet=layer)
vtxIds = OM.MIntArray()
lenSel = len(vtxColors)
vtxIds.setLength(lenSel)
vtxIt = OM.MItMeshVertex(selDagPath)
while not vtxIt.isDone():
idx = vtxIt.index()
vtxIds[idx] = vtxIt.index()
if mono:
randomOffset = 1 - random.uniform(0, value)
vtxColors[idx].r *= randomOffset
vtxColors[idx].g *= randomOffset
vtxColors[idx].b *= randomOffset
else:
vtxColors[idx].r += random.uniform(-color[0]*value, color[0]*value)
vtxColors[idx].g += random.uniform(-color[1]*value, color[1]*value)
vtxColors[idx].b += random.uniform(-color[2]*value, color[2]*value)
vtxIt.next()
mesh.setVertexColors(vtxColors, vtxIds)
selectionIter.next()
def remapRamp(self):
startTimeOcc = maya.cmds.timerX()
layer = sxglobals.settings.tools['selectedLayer']
sxglobals.layers.setColorSet(sxglobals.settings.tools['selectedLayer'])
fvCol = OM.MColor()
if len(sxglobals.settings.componentArray) > 0:
# Convert component selection to face vertices,
# fill position-matching verts with color
selection = maya.cmds.ls(
maya.cmds.polyListComponentConversion(
sxglobals.settings.selectionArray, tvf=True), fl=True)
else:
selection = sxglobals.settings.shapeArray
selectionList = OM.MSelectionList()
for sl in selection:
selectionList.add(sl)
selDagPath = OM.MDagPath()
fVert = OM.MObject()
fvColors = OM.MColorArray()
vtxIds = OM.MIntArray()
fvIds = OM.MIntArray()
faceIds = OM.MIntArray()
compDagPath = OM.MDagPath()
selectionIter = OM.MItSelectionList(selectionList)
while not selectionIter.isDone():
# Gather full mesh data to compare selection against
selDagPath = selectionIter.getDagPath()
mesh = OM.MFnMesh(selDagPath)
fvColors.clear()
fvColors = mesh.getFaceVertexColors(colorSet=layer)
selLen = len(fvColors)
vtxIds.setLength(selLen)
fvIds.setLength(selLen)
faceIds.setLength(selLen)
meshIter = OM.MItMeshFaceVertex(selDagPath)
i = 0
while not meshIter.isDone():
vtxIds[i] = meshIter.vertexId()
faceIds[i] = meshIter.faceId()
fvIds[i] = meshIter.faceVertexId()
i += 1
meshIter.next()
if selectionIter.hasComponents():
(compDagPath, fVert) = selectionIter.getComponent()
# Iterate through selected facevertices on current selection
fvIt = OM.MItMeshFaceVertex(selDagPath, fVert)
while not fvIt.isDone():
faceId = fvIt.faceId()
fvId = fvIt.faceVertexId()
vtxId = fvIt.vertexId()
for idx in xrange(selLen):
if (faceId == faceIds[idx] and
fvId == fvIds[idx] and
vtxId == vtxIds[idx] and
compDagPath == selDagPath):
fvCol = fvColors[idx]
luminance = ((fvCol.r +
fvCol.r +
fvCol.b +
fvCol.g +
fvCol.g +
fvCol.g) / float(6.0))
outColor = maya.cmds.colorAtPoint(
'SXRamp', o='RGB', u=luminance, v=luminance)
outAlpha = maya.cmds.colorAtPoint(
'SXAlphaRamp', o='A', u=luminance, v=luminance)
fvColors[idx].r = outColor[0]
fvColors[idx].g = outColor[1]
fvColors[idx].b = outColor[2]
fvColors[idx].a = outAlpha[0]
break
fvIt.next()
else:
fvIt = OM.MItMeshFaceVertex(selDagPath)
k = 0
while not fvIt.isDone():
fvCol = fvColors[k]
luminance = ((fvCol.r +
fvCol.r +
fvCol.b +
fvCol.g +
fvCol.g +
fvCol.g) / float(6.0))
outColor = maya.cmds.colorAtPoint(
'SXRamp', o='RGB', u=luminance, v=luminance)
outAlpha = maya.cmds.colorAtPoint(
'SXAlphaRamp', o='A', u=luminance, v=luminance)
fvColors[k].r = outColor[0]
fvColors[k].g = outColor[1]
fvColors[k].b = outColor[2]
fvColors[k].a = outAlpha[0]
k += 1
fvIt.next()
mesh.setFaceVertexColors(fvColors, faceIds, vtxIds)
selectionIter.next()
totalTime = maya.cmds.timerX(startTime=startTimeOcc)
print(
'SX Tools: Surface luminance remap duration ' + str(totalTime))
def copyLayer(self, shapes, mode=1):
refLayers = sxglobals.layers.sortLayers(
sxglobals.settings.project['LayerData'].keys())
layerA = sxglobals.settings.tools['sourceLayer']
layerB = sxglobals.settings.tools['targetLayer']
if (layerA in refLayers) and (layerB in refLayers):
for shape in shapes:
attrA = '.' + layerA + 'BlendMode'
modeA = maya.cmds.getAttr(str(shape) + attrA)
attrB = '.' + layerB + 'BlendMode'
selectionList = OM.MSelectionList()
selectionList.add(shape)
nodeDagPath = OM.MDagPath()
nodeDagPath = selectionList.getDagPath(0)
MFnMesh = OM.MFnMesh(nodeDagPath)
layerAColors = OM.MColorArray()
layerAColors = MFnMesh.getFaceVertexColors(colorSet=layerA)
if mode == 2:
modeB = maya.cmds.getAttr(str(shape) + attrB)
layerBColors = OM.MColorArray()
layerBColors = MFnMesh.getFaceVertexColors(colorSet=layerB)
temp = OM.MColorArray()
temp = layerBColors
faceIds = OM.MIntArray()
vtxIds = OM.MIntArray()
lenSel = len(layerAColors)
faceIds.setLength(lenSel)
vtxIds.setLength(lenSel)
fvIt = OM.MItMeshFaceVertex(nodeDagPath)
k = 0
while not fvIt.isDone():
faceIds[k] = fvIt.faceId()
vtxIds[k] = fvIt.vertexId()
k += 1
fvIt.next()
maya.cmds.polyColorSet(shape, currentColorSet=True, colorSet=layerB)
MFnMesh.setFaceVertexColors(layerAColors, faceIds, vtxIds)
maya.cmds.setAttr(str(shape) + attrB, modeA)
if mode == 2:
maya.cmds.polyColorSet(shape, currentColorSet=True, colorSet=layerA)
MFnMesh.setFaceVertexColors(temp, faceIds, vtxIds)
maya.cmds.setAttr(str(shape) + attrA, modeB)
sxglobals.layers.refreshLayerList()
sxglobals.layers.compositeLayers()
else:
print('SXTools Error: Invalid layers!')
# Updates tool title bar and returns active shading mode
def verifyShadingMode(self):
if len(sxglobals.settings.shapeArray) > 0:
obj = sxglobals.settings.shapeArray[len(sxglobals.settings.shapeArray)-1]
mode = int(maya.cmds.getAttr(obj + '.shadingMode') + 1)
objectLabel = (
'Selected Objects: ' +
str(len(sxglobals.settings.objectArray)) +
' | ' +
'Layer Set: ' +
str(int(maya.cmds.getAttr(
str(sxglobals.settings.shapeArray[0]) +
'.activeLayerSet'))+1) + '/' +
str(sxglobals.layers.getLayerSets(
sxglobals.settings.shapeArray[0])+1))
maya.cmds.frameLayout('layerFrame', edit=True, label=objectLabel)
maya.cmds.radioButtonGrp('shadingButtons', edit=True, select=mode)
return mode
def setShadingMode(self, mode):
for shape in sxglobals.settings.shapeArray:
| |
# -*- mode: python; coding: utf-8 -*-
# Copyright 2015-2018 <NAME> <<EMAIL>> and collaborators.
# Licensed under the MIT License.
"""sas - running software in the SAS environment
To use, export an environment variable $PWKIT_SAS pointing to the SAS
installation root. The files $PWKIT_SAS/RELEASE and $PWKIT_SAS/setsas.sh
should exist. The "current calibration files" (CCF) should be accessible as
$PWKIT_SAS/ccf/; a symlink may make sense if multiple SAS versions are going
to be used.
SAS is unusual because you need to set up some magic environment variables
specific to the dataset that you're working with. There is also default
preparation to be run on each dataset before anything useful can be done.
Unpacking data sets
==========================
Data sets are downloaded as tar.gz files. Those unpack to a few files in '.'
including a .TAR file, which should be unpacked too. That unpacks to a bunch
of data files in '.' as well.
SAS installation notes
==========================
Download tarball from, e.g.,
ftp://legacy.gsfc.nasa.gov/xmm/software/sas/14.0.0/64/Linux/Fedora20/
Tarball unpacks installation script and data into '.', and the installation
script sets up a SAS install in a versioned subdirectory of '.', so curl|tar
should be run from something like /a/sas::
$ ./install.sh
The CCF are like CALDB and need to be rsynced -- see the update-ccf
subcommand.
ODF data format notes
=========================
ODF files all have names in the format RRRR_NNNNNNNNNN_IIUEEECCMMM.ZZZ where:
RRRR
revolution (orbit) number
NNNNNNNNNN
obs ID
II
The instrument:
OM
optical monitor
R1
RGS (reflection grating spectrometer) unit 1
R2
RGS 2
M1
EPIC (imaging camera) MOS 1 detector
M2
EPIC (imaging camera) MOS 2 detector
PN
EPIC (imaging camera) PN detector
RM
EPIC radiation monitor
SC
spacecraft
U
Scheduling status of exposure:
S
scheduled
U
unscheduled
X
N/A
EEE
exposure number
CC
CCD/OM-window ID
MMM
data type of file (many; not listed here)
ZZZ
file extension
See the ``make-*-aliases`` commands for tools that generate symlinks with saner
names.
"""
from __future__ import absolute_import, division, print_function
__all__ = ''.split()
import io, os.path, six
from ... import PKError, cli
from ...cli import multitool
from ...io import Path
from .. import Environment, prepend_environ_path, user_data_path
class SasEnvironment(Environment):
_odfdir = None
_revnum = None
_obsid = None
_sumsas = None
_installdir = None
_heaenv = None
def __init__(self, manifest, installdir=None, heaenv=None):
if installdir is None:
installdir = self._default_installdir()
if heaenv is None:
from .. import heasoft
heaenv = heasoft.HeasoftEnvironment()
self._installdir = os.path.abspath(installdir)
self._heaenv = heaenv
# TODO: I used to read the manifest file to infer both the revolution
# number and obsid, but in the case of 0673000145, the obsid mentioned
# in the manifest is different! (But close: 0673000101.) So now I glob
# the containing directory for that.
manifest = Path(manifest)
for line in manifest.read_lines():
if not line.startswith('File '):
continue
bits = line.split()[1].split('_')
if len(bits) < 3:
continue
self._revnum = bits[0] # note: kept as a string; not an int
break
self._odfdir = Path(manifest).resolve().parent
for p in self._odfdir.glob('%s_*_*.FIT' % self._revnum):
bits = p.name.split('_')
self._obsid = bits[1]
break
self._sumsas = self._odfdir / ('%s_%s_SCX00000SUM.SAS' % (self._revnum, self._obsid))
def _default_installdir(self):
d = os.environ.get('PWKIT_SAS')
if d is None:
raise PKError('SAS installation directory must be specified '
'in the $PWKIT_SAS environment variable')
return d
def modify_environment(self, env):
self._heaenv.modify_environment(env)
def path(*args):
return os.path.join(self._installdir, *args)
env['SAS_DIR'] = path()
env['SAS_PATH'] = env['SAS_DIR']
env['SAS_CCFPATH'] = path('ccf')
env['SAS_ODF'] = str(self._sumsas) # but see _preexec
env['SAS_CCF'] = str(self._odfdir / 'ccf.cif')
prepend_environ_path(env, 'PATH', path('bin'))
prepend_environ_path(env, 'LD_LIBRARY_PATH', path('libextra'))
prepend_environ_path(env, 'LD_LIBRARY_PATH', path('lib'))
prepend_environ_path(env, 'PERL5LIB', path('lib', 'perl5'))
env['SAS_BROWSER'] = 'firefox' # yay hardcoding
env['SAS_IMAGEVIEWER'] = 'ds9'
env['SAS_SUPPRESS_WARNING'] = '1'
env['SAS_VERBOSITY'] = '4'
# These can be helpful:
env['PWKIT_SAS_REVNUM'] = self._revnum
env['PWKIT_SAS_OBSID'] = self._obsid
return env
def _preexec(self, env, printbuilds=True):
from ...cli import wrapout
# Need to compile the CCF info?
cif = env['SAS_CCF']
if not os.path.exists(cif):
if printbuilds:
print('[building %s]' % cif)
env['SAS_ODF'] = str(self._odfdir)
log = self._odfdir / 'cifbuild.log'
with log.open('wb') as f:
w = wrapout.Wrapper(f)
w.use_colors = True
if w.launch('cifbuild', ['cifbuild'], env=env, cwd=str(self._odfdir)):
raise PKError('failed to build CIF; see %s', log)
if not os.path.exists(cif):
# cifbuild can exit with status 0 whilst still having failed
raise PKError('failed to build CIF; see %s', log)
env['SAS_ODF'] = str(self._sumsas)
# Need to generate SUM.SAS file?
if not self._sumsas.exists():
if printbuilds:
print('[building %s]' % self._sumsas)
env['SAS_ODF'] = str(self._odfdir)
log = self._odfdir / 'odfingest.log'
with log.open('wb') as f:
w = wrapout.Wrapper(f)
w.use_colors = True
if w.launch('odfingest', ['odfingest'], env=env, cwd=str(self._odfdir)):
raise PKError('failed to build CIF; see %s', log)
env['SAS_ODF'] = str(self._sumsas)
# Command-line interface
class Exec(multitool.Command):
name = 'exec'
argspec = '<manifest> <command> [args...]'
summary = 'Run a program in SAS.'
more_help = '''Due to the way SAS works, the path to a MANIFEST.nnnnn file in an ODF
directory must be specified, and all operations work on the specified data
set.'''
def invoke(self, args, **kwargs):
if len(args) < 2:
raise multitool.UsageError('exec requires at least 2 arguments')
manifest = args[0]
progargv = args[1:]
env = SasEnvironment(manifest)
env.execvpe(progargv)
class MakeEPICAliases(multitool.Command):
name = 'make-epic-aliases'
argspec = '<srcdir> <destdir>'
summary = 'Generate user-friendly aliases to XMM-Newton EPIC data files.'
more_help = '''destdir should already not exist and will be created. <srcdir> should
be the ODF directory, containing a file named MANIFEST.<numbers> and many others.'''
INSTRUMENT = slice(16, 18)
EXPFLAG = slice(18, 19) # 'S': sched, 'U': unsched; 'X': N/A
EXPNO = slice(19, 22)
CCDNO = slice(22, 24)
DTYPE = slice(24, 27)
EXTENSION = slice(28, None)
instrmap = {
'M1': 'mos1',
'M2': 'mos2',
'PN': 'pn',
'RM': 'radmon',
}
extmap = {
'FIT': 'fits',
}
dtypemap = {
'aux': 'aux',
'bue': 'burst',
'ccx': 'counting_cycle',
'cte': 'compressed_timing',
'dii': 'diagnostic',
'dli': 'discarded_lines',
'ecx': 'hk_extraheating_config', # or radiation mon count rate
'esx': 'spectra', # radiation monitor spectra, that is
'hbh': 'hk_hbr_buffer',
'hch': 'hk_hbr_config',
'hdi': 'high_rate_offset_data',
'hth': 'hk_hbr_threshold',
'ime': 'imaging',
'noi': 'noise',
'odi': 'offset_data',
'ove': 'offset_variance',
'pah': 'hk_additional',
'peh': 'hk_periodic',
'pmh': 'hk_main',
'pth': 'hk_bright_pixels',
'rie': 'reduced_imaging',
'tmh': 'hk_thermal_limits',
'tie': 'timing',
}
def invoke(self, args, **kwargs):
if len(args) != 2:
raise multitool.UsageError('make-epic-aliases requires exactly 2 arguments')
srcdir = Path(args[0])
destdir = Path(args[1])
srcpaths = [x for x in srcdir.iterdir() if len(x.name) > 28]
# Sorted list of exposure numbers.
expnos = dict((i, set()) for i in six.iterkeys(self.instrmap))
for p in srcpaths:
instr = p.name[self.INSTRUMENT]
if instr not in self.instrmap:
continue
expno = int(p.name[self.EXPNO])
dtype = p.name[self.DTYPE]
if expno > 0 and dtype not in ('DLI', 'ODI'):
expnos[instr].add(expno)
expseqs = {}
for k, v in six.iteritems(expnos):
expseqs[self.instrmap[k]] = dict((n, i) for i, n in enumerate(sorted(v)))
# Do it.
stems = set()
destdir.mkdir() # intentionally crash if exists; easiest approach
for p in srcpaths:
instr = p.name[self.INSTRUMENT]
if instr not in self.instrmap:
continue
eflag = p.name[self.EXPFLAG]
expno = p.name[self.EXPNO]
ccdno = p.name[self.CCDNO]
dtype = p.name[self.DTYPE]
ext = p.name[self.EXTENSION]
instr = self.instrmap[instr]
expno = int(expno)
dtype = self.dtypemap[dtype.lower()]
ext = self.extmap[ext]
if expno > 0 and dtype not in ('discarded_lines', 'offset_data'):
expno = expseqs[instr][expno]
if instr == 'radmon' and dtype == 'hk_extraheating_config':
dtype = 'rates'
if instr == 'radmon' or dtype == 'aux':
stem = '%s_e%03d_%s.%s' % (instr, expno, dtype, ext)
elif ccdno == '00':
stem = '%s_%s.%s' % (instr, dtype, ext)
elif dtype in ('discarded_lines', 'offset_data'):
stem = '%s_%s_e%03d_c%s.%s' % (instr, dtype, expno, ccdno, ext)
else:
stem = '%s_e%03d_c%s_%s.%s' % (instr, expno, ccdno, dtype, ext)
if stem in stems:
cli.die('short identifier clash: %r', stem)
stems.add(stem)
(destdir / stem).rellink_to(p)
class MakeOMAliases(multitool.Command):
name = 'make-om-aliases'
argspec = '<srcdir> <destdir>'
summary = 'Generate user-friendly aliases to XMM-Newton OM data files.'
more_help = 'destdir should already not exist and will be created.'
PROD_TYPE = slice(0, 1) # 'P': final product; 'F': intermediate
OBSID = slice(1, 11)
EXPFLAG = slice(11, 12) # 'S': sched, 'U': unsched; 'X': N/A
EXPNO = slice(14, 17) # (12-14 is the string 'OM')
DTYPE = slice(17, 23)
WINNUM = slice(23, 24)
SRCNUM = slice(24, 27)
EXTENSION = slice(28, None)
extmap = {
'ASC': 'txt',
'FIT': 'fits',
'PDF': 'pdf',
'PS': 'ps',
}
dtypemap = {
'image_': 'image_ccd',
'simage': 'image_sky',
'swsrli': 'source_list',
'timesr': 'lightcurve',
'tshplt': 'tracking_plot',
'tstrts': 'tracking_stars',
}
def invoke(self, args, **kwargs):
if len(args) != 2:
raise multitool.UsageError('make-om-aliases requires exactly 2 arguments')
from fnmatch import fnmatch
srcdir, destdir = args
srcfiles = [x for x in os.listdir(srcdir)
if x[0] == 'P' and len(x) > | |
<reponame>npodewitz/airflow<gh_stars>1000+
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google DataFusion operators."""
from time import sleep
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Union
from google.api_core.retry import exponential_sleep_generator
from googleapiclient.errors import HttpError
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.datafusion import SUCCESS_STATES, DataFusionHook, PipelineStates
from airflow.providers.google.cloud.links.base import BaseGoogleLink
if TYPE_CHECKING:
from airflow.utils.context import Context
BASE_LINK = "https://console.cloud.google.com/data-fusion"
DATAFUSION_INSTANCE_LINK = BASE_LINK + "/locations/{region}/instances/{instance_name}?project={project_id}"
DATAFUSION_PIPELINES_LINK = "{uri}/cdap/ns/default/pipelines"
DATAFUSION_PIPELINE_LINK = "{uri}/pipelines/ns/default/view/{pipeline_name}"
class DataFusionPipelineLinkHelper:
"""Helper class for Pipeline links"""
@staticmethod
def get_project_id(instance):
instance = instance["name"]
project_id = [x for x in instance.split("/") if x.startswith("airflow")][0]
return project_id
class DataFusionInstanceLink(BaseGoogleLink):
"""Helper class for constructing Data Fusion Instance link"""
name = "Data Fusion Instance"
key = "instance_conf"
format_str = DATAFUSION_INSTANCE_LINK
@staticmethod
def persist(
context: "Context",
task_instance: Union[
"CloudDataFusionRestartInstanceOperator",
"CloudDataFusionCreateInstanceOperator",
"CloudDataFusionUpdateInstanceOperator",
"CloudDataFusionGetInstanceOperator",
],
project_id: str,
):
task_instance.xcom_push(
context=context,
key=DataFusionInstanceLink.key,
value={
"region": task_instance.location,
"instance_name": task_instance.instance_name,
"project_id": project_id,
},
)
class DataFusionPipelineLink(BaseGoogleLink):
"""Helper class for constructing Data Fusion Pipeline link"""
name = "Data Fusion Pipeline"
key = "pipeline_conf"
format_str = DATAFUSION_PIPELINE_LINK
@staticmethod
def persist(
context: "Context",
task_instance: Union[
"CloudDataFusionCreatePipelineOperator",
"CloudDataFusionStartPipelineOperator",
"CloudDataFusionStopPipelineOperator",
],
uri: str,
):
task_instance.xcom_push(
context=context,
key=DataFusionPipelineLink.key,
value={
"uri": uri,
"pipeline_name": task_instance.pipeline_name,
},
)
class DataFusionPipelinesLink(BaseGoogleLink):
"""Helper class for constructing list of Data Fusion Pipelines link"""
name = "Data Fusion Pipelines"
key = "pipelines_conf"
format_str = DATAFUSION_PIPELINES_LINK
@staticmethod
def persist(
context: "Context",
task_instance: "CloudDataFusionListPipelinesOperator",
uri: str,
):
task_instance.xcom_push(
context=context,
key=DataFusionPipelinesLink.key,
value={
"uri": uri,
},
)
class CloudDataFusionRestartInstanceOperator(BaseOperator):
"""
Restart a single Data Fusion instance.
At the end of an operation instance is fully restarted.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataFusionRestartInstanceOperator`
:param instance_name: The name of the instance to restart.
:param location: The Cloud Data Fusion location in which to handle the request.
:param project_id: The ID of the Google Cloud project that the instance belongs to.
:param api_version: The version of the api that will be requested for example 'v3'.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"instance_name",
"impersonation_chain",
)
operator_extra_links = (DataFusionInstanceLink(),)
def __init__(
self,
*,
instance_name: str,
location: str,
project_id: Optional[str] = None,
api_version: str = "v1beta1",
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.instance_name = instance_name
self.location = location
self.project_id = project_id
self.api_version = api_version
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
def execute(self, context: 'Context') -> None:
hook = DataFusionHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Restarting Data Fusion instance: %s", self.instance_name)
operation = hook.restart_instance(
instance_name=self.instance_name,
location=self.location,
project_id=self.project_id,
)
instance = hook.wait_for_operation(operation)
self.log.info("Instance %s restarted successfully", self.instance_name)
project_id = self.project_id or DataFusionPipelineLinkHelper.get_project_id(instance)
DataFusionInstanceLink.persist(context=context, task_instance=self, project_id=project_id)
class CloudDataFusionDeleteInstanceOperator(BaseOperator):
"""
Deletes a single Date Fusion instance.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataFusionDeleteInstanceOperator`
:param instance_name: The name of the instance to restart.
:param location: The Cloud Data Fusion location in which to handle the request.
:param project_id: The ID of the Google Cloud project that the instance belongs to.
:param api_version: The version of the api that will be requested for example 'v3'.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"instance_name",
"impersonation_chain",
)
def __init__(
self,
*,
instance_name: str,
location: str,
project_id: Optional[str] = None,
api_version: str = "v1beta1",
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.instance_name = instance_name
self.location = location
self.project_id = project_id
self.api_version = api_version
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
def execute(self, context: 'Context') -> None:
hook = DataFusionHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Deleting Data Fusion instance: %s", self.instance_name)
operation = hook.delete_instance(
instance_name=self.instance_name,
location=self.location,
project_id=self.project_id,
)
hook.wait_for_operation(operation)
self.log.info("Instance %s deleted successfully", self.instance_name)
class CloudDataFusionCreateInstanceOperator(BaseOperator):
"""
Creates a new Data Fusion instance in the specified project and location.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataFusionCreateInstanceOperator`
:param instance_name: The name of the instance to create.
:param instance: An instance of Instance.
https://cloud.google.com/data-fusion/docs/reference/rest/v1beta1/projects.locations.instances#Instance
:param location: The Cloud Data Fusion location in which to handle the request.
:param project_id: The ID of the Google Cloud project that the instance belongs to.
:param api_version: The version of the api that will be requested for example 'v3'.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"instance_name",
"instance",
"impersonation_chain",
)
operator_extra_links = (DataFusionInstanceLink(),)
def __init__(
self,
*,
instance_name: str,
instance: Dict[str, Any],
location: str,
project_id: Optional[str] = None,
api_version: str = "v1beta1",
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.instance_name = instance_name
self.instance = instance
self.location = location
self.project_id = project_id
self.api_version = api_version
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
def execute(self, context: 'Context') -> dict:
hook = DataFusionHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Creating Data Fusion instance: %s", self.instance_name)
try:
operation = hook.create_instance(
instance_name=self.instance_name,
instance=self.instance,
location=self.location,
project_id=self.project_id,
)
instance = hook.wait_for_operation(operation)
self.log.info("Instance %s created successfully", self.instance_name)
except HttpError as err:
if err.resp.status not in (409, '409'):
raise
| |
http_archive,
name = "raze__humantime__1_3_0",
url = "https://crates.io/api/v1/crates/humantime/1.3.0/download",
type = "tar.gz",
strip_prefix = "humantime-1.3.0",
build_file = Label("//rules/rust/remote:BUILD.humantime-1.3.0.bazel"),
)
maybe(
http_archive,
name = "raze__idna__0_2_0",
url = "https://crates.io/api/v1/crates/idna/0.2.0/download",
type = "tar.gz",
strip_prefix = "idna-0.2.0",
build_file = Label("//rules/rust/remote:BUILD.idna-0.2.0.bazel"),
)
maybe(
http_archive,
name = "raze__indexmap__1_6_1",
url = "https://crates.io/api/v1/crates/indexmap/1.6.1/download",
type = "tar.gz",
strip_prefix = "indexmap-1.6.1",
build_file = Label("//rules/rust/remote:BUILD.indexmap-1.6.1.bazel"),
)
maybe(
http_archive,
name = "raze__instant__0_1_9",
url = "https://crates.io/api/v1/crates/instant/0.1.9/download",
type = "tar.gz",
strip_prefix = "instant-0.1.9",
build_file = Label("//rules/rust/remote:BUILD.instant-0.1.9.bazel"),
)
maybe(
http_archive,
name = "raze__integer_encoding__2_1_1",
url = "https://crates.io/api/v1/crates/integer-encoding/2.1.1/download",
type = "tar.gz",
strip_prefix = "integer-encoding-2.1.1",
build_file = Label("//rules/rust/remote:BUILD.integer-encoding-2.1.1.bazel"),
)
maybe(
http_archive,
name = "raze__iovec__0_1_4",
url = "https://crates.io/api/v1/crates/iovec/0.1.4/download",
type = "tar.gz",
strip_prefix = "iovec-0.1.4",
build_file = Label("//rules/rust/remote:BUILD.iovec-0.1.4.bazel"),
)
maybe(
http_archive,
name = "raze__ipconfig__0_2_2",
url = "https://crates.io/api/v1/crates/ipconfig/0.2.2/download",
type = "tar.gz",
strip_prefix = "ipconfig-0.2.2",
build_file = Label("//rules/rust/remote:BUILD.ipconfig-0.2.2.bazel"),
)
maybe(
http_archive,
name = "raze__itertools__0_9_0",
url = "https://crates.io/api/v1/crates/itertools/0.9.0/download",
type = "tar.gz",
strip_prefix = "itertools-0.9.0",
build_file = Label("//rules/rust/remote:BUILD.itertools-0.9.0.bazel"),
)
maybe(
http_archive,
name = "raze__itoa__0_4_7",
url = "https://crates.io/api/v1/crates/itoa/0.4.7/download",
type = "tar.gz",
strip_prefix = "itoa-0.4.7",
build_file = Label("//rules/rust/remote:BUILD.itoa-0.4.7.bazel"),
)
maybe(
http_archive,
name = "raze__kernel32_sys__0_2_2",
url = "https://crates.io/api/v1/crates/kernel32-sys/0.2.2/download",
type = "tar.gz",
strip_prefix = "kernel32-sys-0.2.2",
build_file = Label("//rules/rust/remote:BUILD.kernel32-sys-0.2.2.bazel"),
)
maybe(
http_archive,
name = "raze__language_tags__0_2_2",
url = "https://crates.io/api/v1/crates/language-tags/0.2.2/download",
type = "tar.gz",
strip_prefix = "language-tags-0.2.2",
build_file = Label("//rules/rust/remote:BUILD.language-tags-0.2.2.bazel"),
)
maybe(
http_archive,
name = "raze__lazy_static__1_4_0",
url = "https://crates.io/api/v1/crates/lazy_static/1.4.0/download",
type = "tar.gz",
strip_prefix = "lazy_static-1.4.0",
build_file = Label("//rules/rust/remote:BUILD.lazy_static-1.4.0.bazel"),
)
maybe(
http_archive,
name = "raze__levenshtein_automata__0_1_1",
url = "https://crates.io/api/v1/crates/levenshtein_automata/0.1.1/download",
type = "tar.gz",
strip_prefix = "levenshtein_automata-0.1.1",
build_file = Label("//rules/rust/remote:BUILD.levenshtein_automata-0.1.1.bazel"),
)
maybe(
http_archive,
name = "raze__lexical_core__0_7_4",
url = "https://crates.io/api/v1/crates/lexical-core/0.7.4/download",
type = "tar.gz",
strip_prefix = "lexical-core-0.7.4",
build_file = Label("//rules/rust/remote:BUILD.lexical-core-0.7.4.bazel"),
)
maybe(
http_archive,
name = "raze__libc__0_2_81",
url = "https://crates.io/api/v1/crates/libc/0.2.81/download",
type = "tar.gz",
strip_prefix = "libc-0.2.81",
build_file = Label("//rules/rust/remote:BUILD.libc-0.2.81.bazel"),
)
maybe(
http_archive,
name = "raze__linked_hash_map__0_3_0",
url = "https://crates.io/api/v1/crates/linked-hash-map/0.3.0/download",
type = "tar.gz",
strip_prefix = "linked-hash-map-0.3.0",
build_file = Label("//rules/rust/remote:BUILD.linked-hash-map-0.3.0.bazel"),
)
maybe(
http_archive,
name = "raze__linked_hash_map__0_5_3",
url = "https://crates.io/api/v1/crates/linked-hash-map/0.5.3/download",
type = "tar.gz",
strip_prefix = "linked-hash-map-0.5.3",
build_file = Label("//rules/rust/remote:BUILD.linked-hash-map-0.5.3.bazel"),
)
maybe(
http_archive,
name = "raze__lock_api__0_4_2",
url = "https://crates.io/api/v1/crates/lock_api/0.4.2/download",
type = "tar.gz",
strip_prefix = "lock_api-0.4.2",
build_file = Label("//rules/rust/remote:BUILD.lock_api-0.4.2.bazel"),
)
maybe(
http_archive,
name = "raze__log__0_4_11",
url = "https://crates.io/api/v1/crates/log/0.4.11/download",
type = "tar.gz",
strip_prefix = "log-0.4.11",
build_file = Label("//rules/rust/remote:BUILD.log-0.4.11.bazel"),
)
maybe(
http_archive,
name = "raze__log_mdc__0_1_0",
url = "https://crates.io/api/v1/crates/log-mdc/0.1.0/download",
type = "tar.gz",
strip_prefix = "log-mdc-0.1.0",
build_file = Label("//rules/rust/remote:BUILD.log-mdc-0.1.0.bazel"),
)
maybe(
http_archive,
name = "raze__log_ndc__0_2_0",
url = "https://crates.io/api/v1/crates/log-ndc/0.2.0/download",
type = "tar.gz",
strip_prefix = "log-ndc-0.2.0",
build_file = Label("//rules/rust/remote:BUILD.log-ndc-0.2.0.bazel"),
)
maybe(
http_archive,
name = "raze__log4rs__0_10_0",
url = "https://crates.io/api/v1/crates/log4rs/0.10.0/download",
type = "tar.gz",
strip_prefix = "log4rs-0.10.0",
build_file = Label("//rules/rust/remote:BUILD.log4rs-0.10.0.bazel"),
)
maybe(
http_archive,
name = "raze__lru__0_6_3",
url = "https://crates.io/api/v1/crates/lru/0.6.3/download",
type = "tar.gz",
strip_prefix = "lru-0.6.3",
build_file = Label("//rules/rust/remote:BUILD.lru-0.6.3.bazel"),
)
maybe(
http_archive,
name = "raze__lru_cache__0_1_2",
url = "https://crates.io/api/v1/crates/lru-cache/0.1.2/download",
type = "tar.gz",
strip_prefix = "lru-cache-0.1.2",
build_file = Label("//rules/rust/remote:BUILD.lru-cache-0.1.2.bazel"),
)
maybe(
http_archive,
name = "raze__lz4__1_23_2",
url = "https://crates.io/api/v1/crates/lz4/1.23.2/download",
type = "tar.gz",
strip_prefix = "lz4-1.23.2",
build_file = Label("//rules/rust/remote:BUILD.lz4-1.23.2.bazel"),
)
maybe(
http_archive,
name = "raze__lz4_sys__1_9_2",
url = "https://crates.io/api/v1/crates/lz4-sys/1.9.2/download",
type = "tar.gz",
strip_prefix = "lz4-sys-1.9.2",
build_file = Label("//rules/rust/remote:BUILD.lz4-sys-1.9.2.bazel"),
)
maybe(
http_archive,
name = "raze__maplit__1_0_2",
url = "https://crates.io/api/v1/crates/maplit/1.0.2/download",
type = "tar.gz",
strip_prefix = "maplit-1.0.2",
build_file = Label("//rules/rust/remote:BUILD.maplit-1.0.2.bazel"),
)
maybe(
http_archive,
name = "raze__match_cfg__0_1_0",
url = "https://crates.io/api/v1/crates/match_cfg/0.1.0/download",
type = "tar.gz",
strip_prefix = "match_cfg-0.1.0",
build_file = Label("//rules/rust/remote:BUILD.match_cfg-0.1.0.bazel"),
)
maybe(
http_archive,
name = "raze__matches__0_1_8",
url = "https://crates.io/api/v1/crates/matches/0.1.8/download",
type = "tar.gz",
strip_prefix = "matches-0.1.8",
build_file = Label("//rules/rust/remote:BUILD.matches-0.1.8.bazel"),
)
maybe(
http_archive,
name = "raze__maybe_uninit__2_0_0",
url = "https://crates.io/api/v1/crates/maybe-uninit/2.0.0/download",
type = "tar.gz",
strip_prefix = "maybe-uninit-2.0.0",
build_file = Label("//rules/rust/remote:BUILD.maybe-uninit-2.0.0.bazel"),
)
maybe(
http_archive,
name = "raze__md5__0_7_0",
url = "https://crates.io/api/v1/crates/md5/0.7.0/download",
type = "tar.gz",
strip_prefix = "md5-0.7.0",
build_file = Label("//rules/rust/remote:BUILD.md5-0.7.0.bazel"),
)
maybe(
http_archive,
name = "raze__memchr__2_3_4",
url = "https://crates.io/api/v1/crates/memchr/2.3.4/download",
type = "tar.gz",
strip_prefix = "memchr-2.3.4",
build_file = Label("//rules/rust/remote:BUILD.memchr-2.3.4.bazel"),
)
maybe(
http_archive,
name = "raze__memmap__0_7_0",
url = "https://crates.io/api/v1/crates/memmap/0.7.0/download",
type = "tar.gz",
strip_prefix = "memmap-0.7.0",
build_file = Label("//rules/rust/remote:BUILD.memmap-0.7.0.bazel"),
)
maybe(
http_archive,
name = "raze__memoffset__0_5_6",
url = "https://crates.io/api/v1/crates/memoffset/0.5.6/download",
type = "tar.gz",
strip_prefix = "memoffset-0.5.6",
build_file = Label("//rules/rust/remote:BUILD.memoffset-0.5.6.bazel"),
)
maybe(
http_archive,
name = "raze__memoffset__0_6_1",
url = "https://crates.io/api/v1/crates/memoffset/0.6.1/download",
type = "tar.gz",
strip_prefix = "memoffset-0.6.1",
build_file = Label("//rules/rust/remote:BUILD.memoffset-0.6.1.bazel"),
)
maybe(
http_archive,
name = "raze__mime__0_3_16",
url = "https://crates.io/api/v1/crates/mime/0.3.16/download",
type = "tar.gz",
strip_prefix = "mime-0.3.16",
build_file = Label("//rules/rust/remote:BUILD.mime-0.3.16.bazel"),
)
maybe(
http_archive,
name = "raze__miniz_oxide__0_4_3",
url = "https://crates.io/api/v1/crates/miniz_oxide/0.4.3/download",
type = "tar.gz",
strip_prefix = "miniz_oxide-0.4.3",
build_file = Label("//rules/rust/remote:BUILD.miniz_oxide-0.4.3.bazel"),
)
maybe(
http_archive,
name = "raze__mio__0_6_23",
url = "https://crates.io/api/v1/crates/mio/0.6.23/download",
type = "tar.gz",
strip_prefix = "mio-0.6.23",
build_file = Label("//rules/rust/remote:BUILD.mio-0.6.23.bazel"),
)
maybe(
http_archive,
name = "raze__mio__0_7_7",
url = "https://crates.io/api/v1/crates/mio/0.7.7/download",
type = "tar.gz",
strip_prefix = "mio-0.7.7",
build_file = Label("//rules/rust/remote:BUILD.mio-0.7.7.bazel"),
)
maybe(
http_archive,
name = "raze__mio_named_pipes__0_1_7",
url = "https://crates.io/api/v1/crates/mio-named-pipes/0.1.7/download",
type = "tar.gz",
strip_prefix = "mio-named-pipes-0.1.7",
build_file = Label("//rules/rust/remote:BUILD.mio-named-pipes-0.1.7.bazel"),
)
maybe(
http_archive,
name = "raze__mio_uds__0_6_8",
url = "https://crates.io/api/v1/crates/mio-uds/0.6.8/download",
type = "tar.gz",
strip_prefix = "mio-uds-0.6.8",
build_file = Label("//rules/rust/remote:BUILD.mio-uds-0.6.8.bazel"),
)
maybe(
http_archive,
name = "raze__miow__0_2_2",
url = "https://crates.io/api/v1/crates/miow/0.2.2/download",
type = "tar.gz",
strip_prefix = "miow-0.2.2",
build_file = Label("//rules/rust/remote:BUILD.miow-0.2.2.bazel"),
)
maybe(
http_archive,
name = "raze__miow__0_3_6",
url = "https://crates.io/api/v1/crates/miow/0.3.6/download",
type = "tar.gz",
strip_prefix = "miow-0.3.6",
build_file = Label("//rules/rust/remote:BUILD.miow-0.3.6.bazel"),
)
maybe(
http_archive,
name = "raze__murmurhash32__0_2_0",
url = "https://crates.io/api/v1/crates/murmurhash32/0.2.0/download",
type = "tar.gz",
strip_prefix = "murmurhash32-0.2.0",
build_file = Label("//rules/rust/remote:BUILD.murmurhash32-0.2.0.bazel"),
)
maybe(
http_archive,
name = "raze__net2__0_2_37",
url = "https://crates.io/api/v1/crates/net2/0.2.37/download",
type = "tar.gz",
strip_prefix = "net2-0.2.37",
build_file = Label("//rules/rust/remote:BUILD.net2-0.2.37.bazel"),
)
maybe(
http_archive,
name = "raze__nix__0_14_1",
url = "https://crates.io/api/v1/crates/nix/0.14.1/download",
type = "tar.gz",
strip_prefix = "nix-0.14.1",
build_file = Label("//rules/rust/remote:BUILD.nix-0.14.1.bazel"),
)
maybe(
http_archive,
name = "raze__nom__5_1_2",
url = "https://crates.io/api/v1/crates/nom/5.1.2/download",
type = "tar.gz",
strip_prefix = "nom-5.1.2",
build_file = Label("//rules/rust/remote:BUILD.nom-5.1.2.bazel"),
)
maybe(
http_archive,
name = "raze__ntapi__0_3_6",
url = "https://crates.io/api/v1/crates/ntapi/0.3.6/download",
type = "tar.gz",
strip_prefix = "ntapi-0.3.6",
build_file = Label("//rules/rust/remote:BUILD.ntapi-0.3.6.bazel"),
)
maybe(
http_archive,
name = "raze__num_bigint__0_3_1",
url = "https://crates.io/api/v1/crates/num-bigint/0.3.1/download",
type = "tar.gz",
strip_prefix = "num-bigint-0.3.1",
build_file = Label("//rules/rust/remote:BUILD.num-bigint-0.3.1.bazel"),
)
maybe(
http_archive,
name = "raze__num_integer__0_1_44",
url = "https://crates.io/api/v1/crates/num-integer/0.1.44/download",
type = "tar.gz",
strip_prefix = "num-integer-0.1.44",
build_file = Label("//rules/rust/remote:BUILD.num-integer-0.1.44.bazel"),
)
maybe(
http_archive,
name = "raze__num_traits__0_1_43",
url = "https://crates.io/api/v1/crates/num-traits/0.1.43/download",
type = "tar.gz",
strip_prefix = "num-traits-0.1.43",
build_file = Label("//rules/rust/remote:BUILD.num-traits-0.1.43.bazel"),
)
maybe(
http_archive,
name = "raze__num_traits__0_2_14",
url = "https://crates.io/api/v1/crates/num-traits/0.2.14/download",
type = "tar.gz",
strip_prefix = "num-traits-0.2.14",
build_file = Label("//rules/rust/remote:BUILD.num-traits-0.2.14.bazel"),
)
maybe(
http_archive,
name = "raze__num_cpus__1_13_0",
url = "https://crates.io/api/v1/crates/num_cpus/1.13.0/download",
type = "tar.gz",
strip_prefix = "num_cpus-1.13.0",
build_file = Label("//rules/rust/remote:BUILD.num_cpus-1.13.0.bazel"),
)
maybe(
http_archive,
name = "raze__object__0_22_0",
url = "https://crates.io/api/v1/crates/object/0.22.0/download",
type = "tar.gz",
strip_prefix = "object-0.22.0",
build_file = Label("//rules/rust/remote:BUILD.object-0.22.0.bazel"),
)
maybe(
http_archive,
name = "raze__once_cell__1_5_2",
url = "https://crates.io/api/v1/crates/once_cell/1.5.2/download",
type = "tar.gz",
strip_prefix = "once_cell-1.5.2",
build_file = Label("//rules/rust/remote:BUILD.once_cell-1.5.2.bazel"),
)
maybe(
http_archive,
name = "raze__opaque_debug__0_3_0",
url = "https://crates.io/api/v1/crates/opaque-debug/0.3.0/download",
type = "tar.gz",
strip_prefix = "opaque-debug-0.3.0",
build_file = Label("//rules/rust/remote:BUILD.opaque-debug-0.3.0.bazel"),
)
maybe(
http_archive,
name = "raze__ordered_float__1_1_1",
url = "https://crates.io/api/v1/crates/ordered-float/1.1.1/download",
type = "tar.gz",
strip_prefix = "ordered-float-1.1.1",
build_file = Label("//rules/rust/remote:BUILD.ordered-float-1.1.1.bazel"),
)
maybe(
http_archive,
name = "raze__owned_read__0_4_1",
url = "https://crates.io/api/v1/crates/owned-read/0.4.1/download",
type = "tar.gz",
strip_prefix = "owned-read-0.4.1",
build_file = Label("//rules/rust/remote:BUILD.owned-read-0.4.1.bazel"),
)
maybe(
http_archive,
name = "raze__owning_ref__0_4_1",
url = "https://crates.io/api/v1/crates/owning_ref/0.4.1/download",
type = "tar.gz",
strip_prefix = "owning_ref-0.4.1",
build_file = Label("//rules/rust/remote:BUILD.owning_ref-0.4.1.bazel"),
)
maybe(
http_archive,
name = "raze__parking_lot__0_11_1",
url = "https://crates.io/api/v1/crates/parking_lot/0.11.1/download",
type = "tar.gz",
strip_prefix = "parking_lot-0.11.1",
build_file = Label("//rules/rust/remote:BUILD.parking_lot-0.11.1.bazel"),
)
maybe(
http_archive,
name = "raze__parking_lot_core__0_8_2",
url = "https://crates.io/api/v1/crates/parking_lot_core/0.8.2/download",
type = "tar.gz",
strip_prefix = "parking_lot_core-0.8.2",
build_file = Label("//rules/rust/remote:BUILD.parking_lot_core-0.8.2.bazel"),
)
maybe(
http_archive,
name = "raze__percent_encoding__2_1_0",
url = "https://crates.io/api/v1/crates/percent-encoding/2.1.0/download",
type = "tar.gz",
strip_prefix = "percent-encoding-2.1.0",
build_file = Label("//rules/rust/remote:BUILD.percent-encoding-2.1.0.bazel"),
)
maybe(
http_archive,
name = "raze__phf__0_8_0",
url = "https://crates.io/api/v1/crates/phf/0.8.0/download",
type = "tar.gz",
strip_prefix = "phf-0.8.0",
build_file = Label("//rules/rust/remote:BUILD.phf-0.8.0.bazel"),
)
maybe(
http_archive,
name = "raze__phf_shared__0_8_0",
url = "https://crates.io/api/v1/crates/phf_shared/0.8.0/download",
type = "tar.gz",
strip_prefix = "phf_shared-0.8.0",
build_file = Label("//rules/rust/remote:BUILD.phf_shared-0.8.0.bazel"),
)
maybe(
http_archive,
name = "raze__pin_project__0_4_27",
url = "https://crates.io/api/v1/crates/pin-project/0.4.27/download",
type = "tar.gz",
strip_prefix = "pin-project-0.4.27",
build_file = Label("//rules/rust/remote:BUILD.pin-project-0.4.27.bazel"),
)
maybe(
http_archive,
name = "raze__pin_project__1_0_3",
url = "https://crates.io/api/v1/crates/pin-project/1.0.3/download",
type = "tar.gz",
strip_prefix = "pin-project-1.0.3",
build_file = Label("//rules/rust/remote:BUILD.pin-project-1.0.3.bazel"),
)
maybe(
http_archive,
name = "raze__pin_project_internal__0_4_27",
url = "https://crates.io/api/v1/crates/pin-project-internal/0.4.27/download",
type = "tar.gz",
strip_prefix = "pin-project-internal-0.4.27",
build_file = Label("//rules/rust/remote:BUILD.pin-project-internal-0.4.27.bazel"),
)
maybe(
http_archive,
name = "raze__pin_project_internal__1_0_3",
url = "https://crates.io/api/v1/crates/pin-project-internal/1.0.3/download",
type = "tar.gz",
strip_prefix = "pin-project-internal-1.0.3",
build_file = Label("//rules/rust/remote:BUILD.pin-project-internal-1.0.3.bazel"),
)
maybe(
http_archive,
name = "raze__pin_project_lite__0_1_11",
url = "https://crates.io/api/v1/crates/pin-project-lite/0.1.11/download",
type = "tar.gz",
strip_prefix = "pin-project-lite-0.1.11",
build_file = Label("//rules/rust/remote:BUILD.pin-project-lite-0.1.11.bazel"),
)
maybe(
http_archive,
name = "raze__pin_project_lite__0_2_1",
url = "https://crates.io/api/v1/crates/pin-project-lite/0.2.1/download",
type = "tar.gz",
strip_prefix = "pin-project-lite-0.2.1",
build_file = Label("//rules/rust/remote:BUILD.pin-project-lite-0.2.1.bazel"),
)
maybe(
http_archive,
name = "raze__pin_utils__0_1_0",
url = "https://crates.io/api/v1/crates/pin-utils/0.1.0/download",
type = "tar.gz",
strip_prefix = "pin-utils-0.1.0",
build_file = Label("//rules/rust/remote:BUILD.pin-utils-0.1.0.bazel"),
)
maybe(
http_archive,
name | |
# change is applied.
self.vertices: List[MLineVertex] = []
def __len__(self):
""" Count of MLINE vertices. """
return len(self.vertices)
def _copy_data(self, entity: 'MLine') -> None:
entity.vertices = [v.copy() for v in self.vertices]
def load_dxf_attribs(
self, processor: SubclassProcessor = None) -> 'DXFNamespace':
dxf = super().load_dxf_attribs(processor)
if processor:
tags = processor.fast_load_dxfattribs(
dxf, acdb_mline_group_codes, 2, log=False)
self.load_vertices(tags)
return dxf
def load_vertices(self, tags: Tags) -> None:
self.vertices.extend(
MLineVertex.load(tags) for tags in group_tags(tags, splitcode=11)
)
def preprocess_export(self, tagwriter: 'TagWriter') -> bool:
# Do not export MLines without vertices
return len(self.vertices) > 1
# todo: check if line- and fill parametrization is compatible with
# MLINE style, requires same count of elements!
def export_entity(self, tagwriter: 'TagWriter') -> None:
# ezdxf does not export MLINE entities without vertices,
# see method preprocess_export()
self.set_flag_state(self.HAS_VERTICES, True)
super().export_entity(tagwriter)
tagwriter.write_tag2(const.SUBCLASS_MARKER, acdb_mline.name)
self.dxf.export_dxf_attribs(tagwriter, acdb_mline.attribs.keys())
self.export_vertices(tagwriter)
def export_vertices(self, tagwriter: 'TagWriter') -> None:
for vertex in self.vertices:
vertex.export_dxf(tagwriter)
@property
def is_closed(self) -> bool:
""" Returns ``True`` if MLINE is closed.
Compatibility interface to :class:`Polyline`
"""
return self.get_flag_state(self.CLOSED)
def close(self, state: bool = True) -> None:
""" Get/set closed state of MLINE and update geometry accordingly.
Compatibility interface to :class:`Polyline`
"""
state = bool(state)
if state != self.is_closed:
self.set_flag_state(self.CLOSED, state)
self.update_geometry()
@property
def start_caps(self) -> bool:
""" Get/Set start caps state. ``True`` to enable start caps and
``False`` tu suppress start caps. """
return not self.get_flag_state(self.SUPPRESS_START_CAPS)
@start_caps.setter
def start_caps(self, value: bool) -> None:
""" Set start caps state. """
self.set_flag_state(self.SUPPRESS_START_CAPS, not bool(value))
@property
def end_caps(self) -> bool:
""" Get/Set end caps state. ``True`` to enable end caps and
``False`` tu suppress start caps."""
return not self.get_flag_state(self.SUPPRESS_END_CAPS)
@end_caps.setter
def end_caps(self, value: bool) -> None:
""" Set start caps state. """
self.set_flag_state(self.SUPPRESS_END_CAPS, not bool(value))
def set_scale_factor(self, value: float) -> None:
""" Set the scale factor and update geometry accordingly. """
value = float(value)
if not math.isclose(self.dxf.scale_factor, value):
self.dxf.scale_factor = value
self.update_geometry()
def set_justification(self, value: int) -> None:
""" Set MLINE justification and update geometry accordingly.
See :attr:`dxf.justification` for valid settings.
"""
value = int(value)
if self.dxf.justification != value:
self.dxf.justification = value
self.update_geometry()
@property
def style(self) -> Optional['MLineStyle']:
""" Get associated MLINESTYLE. """
if self.doc is None:
return None
_style = self.doc.entitydb.get(self.dxf.style_handle)
if _style is None:
_style = self.doc.mline_styles.get(self.dxf.style_name)
return _style
def set_style(self, name: str) -> None:
""" Set MLINESTYLE by name and update geometry accordingly.
The MLINESTYLE definition must exist.
"""
if self.doc is None:
logger.debug("Can't change style of unbounded MLINE entity.")
return
try:
style = self.doc.mline_styles.get(name)
except const.DXFKeyError:
raise const.DXFValueError(f'Undefined MLINE style: {name}')
# Line- and fill parametrization depends on the count of
# elements, a change in the number of elements triggers a
# reset of the parametrization:
old_style = self.style
new_element_count = len(style.elements)
reset = False
if old_style:
# Do not trust the stored "style_element_count" value
reset = len(self.style.elements) != new_element_count
self.dxf.style_name = name
self.dxf.style_handle = style.dxf.handle
self.dxf.style_element_count = new_element_count
if reset:
self.update_geometry()
def start_location(self) -> Vec3:
""" Returns the start location of the reference line. Callback function
for :attr:`dxf.start_location`.
"""
if len(self.vertices):
return self.vertices[0].location
else:
return NULLVEC
def get_locations(self) -> List[Vec3]:
""" Returns the vertices of the reference line. """
return [v.location for v in self.vertices]
def extend(self, vertices: Iterable['Vertex']) -> None:
""" Append multiple vertices to the reference line.
It is possible to work with 3D vertices, but all vertices have to be in
the same plane and the normal vector of this plan is stored as
extrusion vector in the MLINE entity.
"""
vertices = Vec3.list(vertices)
if not vertices:
return
all_vertices = []
if len(self):
all_vertices.extend(self.get_locations())
all_vertices.extend(vertices)
self.generate_geometry(all_vertices)
def update_geometry(self) -> None:
""" Regenerate the MLINE geometry based on current settings. """
self.generate_geometry(self.get_locations())
def generate_geometry(self, vertices: List[Vec3]) -> None:
""" Regenerate the MLINE geometry for new reference line defined by
`vertices`.
"""
vertices = list(filter_close_vertices(vertices, abs_tol=1e-6))
if len(vertices) == 0:
self.clear()
return
elif len(vertices) == 1:
self.vertices = [MLineVertex.new(vertices[0], X_AXIS, Y_AXIS)]
return
style = self.style
if len(style.elements) == 0:
raise const.DXFStructureError(
f'No line elements defined in {str(style)}.')
def miter(dir1: Vec3, dir2: Vec3):
return ((dir1 + dir2) * 0.5).normalize().orthogonal()
ucs = UCS.from_z_axis_and_point_in_xz(
origin=vertices[0],
point=vertices[1],
axis=self.dxf.extrusion,
)
# Transform given vertices into UCS and project them into the
# UCS-xy-plane by setting the z-axis to 0:
vertices = [v.replace(z=0) for v in ucs.points_from_wcs(vertices)]
start_angle = style.dxf.start_angle
end_angle = style.dxf.end_angle
line_directions = [
(v2 - v1).normalize() for v1, v2 in
zip(vertices, vertices[1:])
]
if self.is_closed:
line_directions.append((vertices[0] - vertices[-1]).normalize())
closing_miter = miter(line_directions[0], line_directions[-1])
miter_directions = [closing_miter]
else:
closing_miter = None
line_directions.append(line_directions[-1])
miter_directions = [line_directions[0].rotate_deg(start_angle)]
for d1, d2 in zip(line_directions, line_directions[1:]):
miter_directions.append(miter(d1, d2))
if closing_miter is None:
miter_directions.pop()
miter_directions.append(line_directions[-1].rotate_deg(end_angle))
else:
miter_directions.append(closing_miter)
self.vertices = [
MLineVertex.new(v, d, m)
for v, d, m in zip(vertices, line_directions, miter_directions)
]
self._update_parametrization()
# reverse transformation into WCS
for v in self.vertices:
v.transform(ucs.matrix)
def _update_parametrization(self):
scale = self.dxf.scale_factor
style = self.style
justification = self.dxf.justification
offsets = [e.offset for e in style.elements]
min_offset = min(offsets)
max_offset = max(offsets)
shift = 0
if justification == self.TOP:
shift = -max_offset
elif justification == self.BOTTOM:
shift = -min_offset
for vertex in self.vertices:
angle = vertex.line_direction.angle_between(vertex.miter_direction)
try:
stretch = scale / math.sin(angle)
except ZeroDivisionError:
stretch = 1.0
vertex.line_params = [
((element.offset + shift) * stretch, 0.0) for element in
style.elements
]
vertex.fill_params = [tuple() for _ in style.elements]
def clear(self) -> None:
""" Remove all MLINE vertices. """
self.vertices.clear()
def remove_dependencies(self, other: 'Drawing' = None) -> None:
""" Remove all dependencies from current document.
(internal API)
"""
if not self.is_alive:
return
super().remove_dependencies(other)
self.dxf.style_handle = '0'
if other:
style = other.mline_styles.get(self.dxf.style_name)
if style:
self.dxf.style_handle = style.dxf.handle
return
self.dxf.style_name = 'Standard'
def transform(self, m: 'Matrix44') -> 'DXFGraphic':
""" Transform MLINE entity by transformation matrix `m` inplace.
"""
for vertex in self.vertices:
vertex.transform(m)
self.dxf.extrusion = m.transform_direction(self.dxf.extrusion)
scale = self.dxf.scale_factor
scale_vec = m.transform_direction(Vec3(scale, scale, scale))
if math.isclose(scale_vec.x, scale_vec.y, abs_tol=1e-6) and \
math.isclose(scale_vec.y, scale_vec.z, abs_tol=1e-6):
self.dxf.scale_factor = sum(scale_vec) / 3 # average error
# None uniform scaling will not be applied to the scale_factor!
self.update_geometry()
return self
def virtual_entities(self) -> Iterable[DXFGraphic]:
""" Yields 'virtual' parts of MLINE as LINE, ARC and HATCH entities.
This entities are located at the original positions, but are not stored
in the entity database, have no handle and are not assigned to any
layout.
"""
from ezdxf.render.mline import virtual_entities
return virtual_entities(self)
def explode(self, target_layout: 'BaseLayout' = None) -> 'EntityQuery':
""" Explode parts of MLINE as LINE, ARC and HATCH entities into target
layout, if target layout is ``None``, the target layout is the layout
of the MLINE.
Returns an :class:`~ezdxf.query.EntityQuery` container with all DXF parts.
Args:
target_layout: target layout for DXF parts, ``None`` for same layout
as source entity.
"""
from ezdxf.explode import explode_entity
return explode_entity(self, target_layout)
def audit(self, auditor: 'Auditor') -> None:
""" Validity check. """
def reset_mline_style(name='Standard'):
auditor.fixed_error(
code=AuditError.RESET_MLINE_STYLE,
message=f'Reset MLINESTYLE to "{name}" in {str(self)}.',
dxf_entity=self,
)
self.dxf.style_name = name
style = doc.mline_styles.get(name)
self.dxf.style_handle = style.dxf.handle
super().audit(auditor)
doc = auditor.doc
if doc is None:
return
# Audit associated MLINESTYLE name and handle:
style = doc.entitydb.get(self.dxf.style_handle)
if style is None: # handle is invalid, get style by name
style = doc.mline_styles.get(self.dxf.style_name, None)
if style is None:
reset_mline_style()
else: # fix MLINESTYLE handle:
auditor.fixed_error(
code=AuditError.INVALID_MLINESTYLE_HANDLE,
message=f'Fixed invalid style handle in {str(self)}.',
dxf_entity=self,
)
self.dxf.style_handle = style.dxf.handle
else: # update MLINESTYLE name silently
self.dxf.style_name = style.dxf.name
# Get current (maybe fixed) MLINESTYLE:
style = self.style
# Update style element count silently:
element_count = len(style.elements)
self.dxf.style_element_count = element_count
# Audit vertices:
for vertex in self.vertices:
if NULLVEC.isclose(vertex.line_direction):
break
if NULLVEC.isclose(vertex.miter_direction):
break
if len(vertex.line_params) != element_count:
break
# Ignore fill parameters.
else: # no break
return
# Invalid vertices found:
auditor.fixed_error(
code=AuditError.INVALID_MLINE_VERTEX,
message=f'Execute geometry update for {str(self)}.',
dxf_entity=self,
)
self.update_geometry()
acdb_mline_style = DefSubclass('AcDbMlineStyle', {
'name': DXFAttr(2, default='Standard'),
# Flags (bit-coded):
# 1 =Fill on
# 2 = Display miters
# 16 = Start square end (line) cap
# 32 = Start inner arcs cap
# 64 = Start round (outer arcs) cap
| |
0."
# ================================ Line Weight ================================
try:
if float(values_dict['lineWeight']) <= 0:
error_msg_dict['lineWeight'] = u"The line weight value must be greater than zero."
except ValueError:
error_msg_dict['lineWeight'] = u"The line weight value must be a real number."
if len(error_msg_dict) > 0:
return False, values_dict, error_msg_dict
else:
# TODO: consider adding this feature to DLFramework and including in all plugins.
# ============================== Log All Changes ==============================
# Log any changes to the plugin preferences.
changed_keys = ()
config_changed = False
for key in values_dict.keys():
try:
if values_dict[key] != self.pluginPrefs[key]:
config_changed = True
changed_keys += (u"{k}".format(k=key),
u"Old: {k}".format(k=self.pluginPrefs[key]),
u"New: {k}".format(k=values_dict[key]),)
# Missing keys will be config dialog format props like labels and separators
except KeyError:
pass
if config_changed:
self.logger.threaddebug(u"values_dict changed: {ck}".format(ck=changed_keys))
values_dict['dpiWarningFlag'] = True
self.logger.threaddebug(u"Preferences validated successfully.")
return True, values_dict
# =============================================================================
def validateDeviceConfigUi(self, values_dict=None, type_id="", dev_id=0):
error_msg_dict = indigo.Dict()
self.logger.threaddebug(u"Validating device configuration parameters.")
# ================================ Area Chart =================================
if type_id == 'areaChartingDevice':
# There must be at least 1 source selected
if values_dict['area1Source'] == 'None':
error_msg_dict['area1Source'] = u"You must select at least one data source."
values_dict['settingsGroup'] = "1"
# Iterate for each area group (1-8).
for area in range(1, 9, 1):
# Line adjustment values
for char in values_dict['area{i}adjuster'.format(i=area)]:
if char not in ' +-/*.0123456789': # allowable numeric specifiers
error_msg_dict['area{i}adjuster'.format(i=area)] = u"Valid operators are +, -, *, /"
values_dict['settingsGroup'] = str(area)
# =============================== Custom Ticks ================================
# Ensure all custom tick locations are numeric, within bounds and of the same length.
if values_dict['customTicksY'].lower() not in ("", 'none'):
custom_ticks = values_dict['customTicksY'].replace(' ', '')
custom_ticks = custom_ticks.split(',')
custom_tick_labels = values_dict['customTicksLabelY'].split(',')
default_y_axis = (values_dict['yAxisMin'], values_dict['yAxisMax'])
default_y_axis = [x.lower() == 'none' for x in default_y_axis]
try:
custom_ticks = [float(_) for _ in custom_ticks]
except ValueError:
error_msg_dict['customTicksY'] = u"All custom tick locations must be numeric values."
values_dict['settingsGroup'] = "y"
# Ensure tick labels and values are the same length.
if len(custom_tick_labels) != len(custom_ticks):
error_msg_dict['customTicksY'] = u"Custom tick labels and custom tick locations must be the " \
u"same length."
error_msg_dict['customTicksLabelY'] = u"Custom tick labels and custom tick locations must be the " \
u"same length."
values_dict['settingsGroup'] = "y"
# Ensure all custom Y tick locations are within bounds. User has elected to
# change at least one Y axis boundary (if both upper and lower bounds are set
# to 'None', we move on).
if not all(default_y_axis):
for tick in custom_ticks:
if values_dict['yAxisMin'].lower() != 'none' and not tick >= float(values_dict['yAxisMin']):
error_msg_dict['customTicksY'] = u"All custom tick locations must be within the " \
u"boundaries of the Y axis."
values_dict['settingsGroup'] = "y"
if values_dict['yAxisMax'].lower() != 'none' and not tick <= float(values_dict['yAxisMax']):
error_msg_dict['customTicksY'] = u"All custom tick locations must be within the " \
u"boundaries of the Y axis."
values_dict['settingsGroup'] = "y"
# ================================ Flow Bar =================================
if type_id == 'barChartingDevice':
# Must select at least one source (bar 1)
if values_dict['bar1Source'] == 'None':
error_msg_dict['bar1Source'] = u"You must select at least one data source."
values_dict['barLabel1'] = True
values_dict['settingsGroup'] = "1"
try:
# Bar width must be greater than 0. Will also trap strings.
if not float(values_dict['barWidth']) >= 0:
raise ValueError
except ValueError:
error_msg_dict['barWidth'] = u"You must enter a bar width greater than 0."
values_dict['settingsGroup'] = "ch"
# =============================== Custom Ticks ================================
# Ensure all custom tick locations are numeric, within bounds and of the same length.
if values_dict['customTicksY'].lower() not in ("", 'none'):
custom_ticks = values_dict['customTicksY'].replace(' ', '')
custom_ticks = custom_ticks.split(',')
custom_tick_labels = values_dict['customTicksLabelY'].split(',')
default_y_axis = (values_dict['yAxisMin'], values_dict['yAxisMax'])
default_y_axis = [x.lower() == 'none' for x in default_y_axis]
try:
custom_ticks = [float(_) for _ in custom_ticks]
except ValueError:
error_msg_dict['customTicksY'] = u"All custom tick locations must be numeric values."
values_dict['settingsGroup'] = "y"
# Ensure tick labels and values are the same length.
if len(custom_tick_labels) != len(custom_ticks):
error_msg_dict['customTicksLabelY'] = u"Custom tick labels and locations must be the same length."
error_msg_dict['customTicksY'] = u"Custom tick labels and locations must be the same length."
values_dict['settingsGroup'] = "y"
# Ensure all custom Y tick locations are within bounds. User has elected to
# change at least one Y axis boundary (if both upper and lower bounds are set
# to 'None', we move on).
if not all(default_y_axis):
for tick in custom_ticks:
# Ensure all custom tick locations are within bounds.
if values_dict['yAxisMin'].lower() != 'none' and not tick >= float(values_dict['yAxisMin']):
error_msg_dict['customTicksY'] = u"All custom tick locations must be within the " \
u"boundaries of the Y axis."
values_dict['settingsGroup'] = "y"
if values_dict['yAxisMax'].lower() != 'none' and not tick <= float(values_dict['yAxisMax']):
error_msg_dict['customTicksY'] = u"All custom tick locations must be within the " \
u"boundaries of the Y axis."
values_dict['settingsGroup'] = "y"
# ================================ Stock Bar ================================
if type_id == 'barStockChartingDevice':
# Must select at least one source (bar 1)
if values_dict['bar1Source'] == 'None':
error_msg_dict['bar1Source'] = u"You must select at least one data source."
values_dict['settingsGroup'] = "1"
try:
# Bar width must be greater than 0. Will also trap strings.
if not float(values_dict['barWidth']) >= 0:
raise ValueError
except ValueError:
error_msg_dict['barWidth'] = u"You must enter a bar width greater than 0."
values_dict['settingsGroup'] = "ch"
# =============================== Custom Ticks ================================
# Ensure all custom tick locations are numeric, within bounds and of the same length.
if values_dict['customTicksY'].lower() not in ("", 'none'):
custom_ticks = values_dict['customTicksY'].replace(' ', '')
custom_ticks = custom_ticks.split(',')
custom_tick_labels = values_dict['customTicksLabelY'].split(',')
default_y_axis = (values_dict['yAxisMin'], values_dict['yAxisMax'])
default_y_axis = [x.lower() == 'none' for x in default_y_axis]
try:
custom_ticks = [float(_) for _ in custom_ticks]
except ValueError:
error_msg_dict['customTicksY'] = u"All custom tick locations must be numeric values."
values_dict['settingsGroup'] = "y"
# Ensure tick labels and values are the same length.
if len(custom_tick_labels) != len(custom_ticks):
error_msg_dict['customTicksLabelY'] = u"Custom tick labels and locations must be the same length."
error_msg_dict['customTicksY'] = u"Custom tick labels and locations must be the same length."
values_dict['settingsGroup'] = "y"
# Ensure all custom Y tick locations are within bounds. User has elected to
# change at least one Y axis boundary (if both upper and lower bounds are set
# to 'None', we move on).
if not all(default_y_axis):
for tick in custom_ticks:
# Ensure all custom tick locations are within bounds.
if values_dict['yAxisMin'].lower() != 'none' and not tick >= float(values_dict['yAxisMin']):
error_msg_dict['customTicksY'] = u"All custom tick locations must be within the " \
u"boundaries of the Y axis."
values_dict['settingsGroup'] = "y"
if values_dict['yAxisMax'].lower() != 'none' and not tick <= float(values_dict['yAxisMax']):
error_msg_dict['customTicksY'] = u"All custom tick locations must be within the " \
u"boundaries of the Y axis."
values_dict['settingsGroup'] = "y"
# Test the selected values to ensure that they can be charted (int, float, bool)
for source in ['bar1Value', 'bar2Value', 'bar3Value', 'bar4Value', 'bar5Value']:
# Pull the number out of the source key
n = re.search('[0-9]', source)
# Get the id of the bar source
if values_dict['bar{0}Source'.format(n.group(0))] != "None":
source_id = int(values_dict['bar{0}Source'.format(n.group(0))])
# By definition it will either be a device ID or a variable ID.
if source_id in indigo.devices.keys():
# Get the selected device state value
val = indigo.devices[source_id].states[values_dict[source]]
if not isinstance(val, (int, float, bool)):
error_msg_dict[source] = u"The selected device state can not be charted due to its value."
else:
val = indigo.variables[source_id].value
try:
float(val)
except ValueError:
if not val.lower() in ['true', 'false']:
error_msg_dict[source] = u"The selected variable value can not be charted due to " \
u"its value."
values_dict['settingsGroup'] = str(n)
# ========================== Stock Horizontal Bar ===========================
if type_id == 'barStockHorizontalChartingDevice':
# Must select at least one source (bar 1)
if values_dict['bar1Source'] == 'None':
error_msg_dict['bar1Source'] = u"You must select at least one data source."
values_dict['settingsGroup'] = "1"
try:
# Bar width must be greater than 0. Will also trap strings.
if not float(values_dict['barWidth']) >= 0:
raise ValueError
except ValueError:
error_msg_dict['barWidth'] = u"You must enter a bar width greater than 0."
values_dict['settingsGroup'] = "ch"
# =============================== Custom Ticks ================================
# Ensure all custom tick locations are numeric, within bounds and of the same length.
if values_dict['customTicksY'].lower() not in ("", 'none'):
custom_ticks = values_dict['customTicksY'].replace(' ', '')
custom_ticks = custom_ticks.split(',')
custom_tick_labels = values_dict['customTicksLabelY'].split(',')
default_y_axis = (values_dict['yAxisMin'], values_dict['yAxisMax'])
default_y_axis | |
Key.KEY_DELETE_FORWARD: libevdev.EV_KEY.KEY_DELETE.value,
Key.KEY_END: libevdev.EV_KEY.KEY_END.value,
Key.KEY_PAGEDOWN: libevdev.EV_KEY.KEY_PAGEDOWN.value,
Key.KEY_RIGHTARROW: libevdev.EV_KEY.KEY_RIGHT.value,
Key.KEY_LEFTARROW: libevdev.EV_KEY.KEY_LEFT.value,
Key.KEY_DOWNARROW: libevdev.EV_KEY.KEY_DOWN.value,
Key.KEY_UPARROW: libevdev.EV_KEY.KEY_UP.value,
Key.KEY_KEYPAD_NUM_LOCK_AND_CLEAR: libevdev.EV_KEY.KEY_NUMLOCK.value,
Key.KEY_KEYPAD_SLASH: libevdev.EV_KEY.KEY_KPSLASH.value,
Key.KEY_KEYPAD_ASTERISK: libevdev.EV_KEY.KEY_KPASTERISK.value,
Key.KEY_KEYPAD_MINUS: libevdev.EV_KEY.KEY_KPMINUS.value,
Key.KEY_KEYPAD_PLUS: libevdev.EV_KEY.KEY_KPPLUS.value,
Key.KEY_KEYPAD_ENTER: libevdev.EV_KEY.KEY_KPENTER.value,
Key.KEY_KEYPAD_1_AND_END: libevdev.EV_KEY.KEY_KP1.value,
Key.KEY_KEYPAD_2_AND_DOWN_ARROW: libevdev.EV_KEY.KEY_KP2.value,
Key.KEY_KEYPAD_3_AND_PAGEDN: libevdev.EV_KEY.KEY_KP3.value,
Key.KEY_KEYPAD_4_AND_LEFT_ARROW: libevdev.EV_KEY.KEY_KP4.value,
Key.KEY_KEYPAD_5: libevdev.EV_KEY.KEY_KP5.value,
Key.KEY_KEYPAD_6_AND_RIGHT_ARROW: libevdev.EV_KEY.KEY_KP6.value,
Key.KEY_KEYPAD_7_AND_HOME: libevdev.EV_KEY.KEY_KP7.value,
Key.KEY_KEYPAD_8_AND_UP_ARROW: libevdev.EV_KEY.KEY_KP8.value,
Key.KEY_KEYPAD_9_AND_PAGEUP: libevdev.EV_KEY.KEY_KP9.value,
Key.KEY_KEYPAD_0_AND_INSERT: libevdev.EV_KEY.KEY_KP0.value,
Key.KEY_KEYPAD_PERIOD_AND_DELETE: libevdev.EV_KEY.KEY_KPDOT.value,
Key.KEY_NON_US_BACKSLASH_AND_PIPE: libevdev.EV_KEY.KEY_102ND.value,
Key.KEY_APPLICATION: libevdev.EV_KEY.KEY_COMPOSE.value,
Key.KEY_POWER: libevdev.EV_KEY.KEY_POWER.value,
Key.KEY_KEYPAD_EQUAL: libevdev.EV_KEY.KEY_KPEQUAL.value,
Key.KEY_F13: libevdev.EV_KEY.KEY_F13.value,
Key.KEY_F14: libevdev.EV_KEY.KEY_F14.value,
Key.KEY_F15: libevdev.EV_KEY.KEY_F15.value,
Key.KEY_F16: libevdev.EV_KEY.KEY_F16.value,
Key.KEY_F17: libevdev.EV_KEY.KEY_F17.value,
Key.KEY_F18: libevdev.EV_KEY.KEY_F18.value,
Key.KEY_F19: libevdev.EV_KEY.KEY_F19.value,
Key.KEY_F20: libevdev.EV_KEY.KEY_F20.value,
Key.KEY_F21: libevdev.EV_KEY.KEY_F21.value,
Key.KEY_F22: libevdev.EV_KEY.KEY_F22.value,
Key.KEY_F23: libevdev.EV_KEY.KEY_F23.value,
Key.KEY_F24: libevdev.EV_KEY.KEY_F24.value,
Key.KEY_EXECUTE: 0,
Key.KEY_HELP: libevdev.EV_KEY.KEY_HELP.value,
Key.KEY_MENU: libevdev.EV_KEY.KEY_MENU.value,
Key.KEY_SELECT: libevdev.EV_KEY.KEY_SELECT.value,
Key.KEY_STOP: libevdev.EV_KEY.KEY_STOP.value,
Key.KEY_AGAIN: libevdev.EV_KEY.KEY_AGAIN.value,
Key.KEY_UNDO: libevdev.EV_KEY.KEY_UNDO.value,
Key.KEY_CUT: libevdev.EV_KEY.KEY_CUT.value,
Key.KEY_COPY: libevdev.EV_KEY.KEY_COPY.value,
Key.KEY_PASTE: libevdev.EV_KEY.KEY_PASTE.value,
Key.KEY_FIND: libevdev.EV_KEY.KEY_FIND.value,
Key.KEY_MUTE: libevdev.EV_KEY.KEY_MUTE.value,
Key.KEY_VOLUME_UP: libevdev.EV_KEY.KEY_VOLUMEUP.value,
Key.KEY_VOLUME_DOWN: libevdev.EV_KEY.KEY_VOLUMEDOWN.value,
Key.KEY_LOCKING_CAPS_LOCK: 0,
Key.KEY_LOCKING_NUM_LOCK: 0,
Key.KEY_LOCKING_SCROLL_LOCK: 0,
Key.KEY_KEYPAD_COMMA: libevdev.EV_KEY.KEY_KPCOMMA.value,
Key.KEY_KEYPAD_EQUAL_SIGN: libevdev.EV_KEY.KEY_KPEQUAL.value,
Key.KEY_KANJI1: 0,
Key.KEY_KANJI2: 0,
Key.KEY_KANJI3: 0,
Key.KEY_KANJI4: 0,
Key.KEY_KANJI5: 0,
Key.KEY_KANJI6: 0,
Key.KEY_KANJI7: 0,
Key.KEY_KANJI8: 0,
Key.KEY_KANJI9: 0,
Key.KEY_LANG1: 0,
Key.KEY_LANG2: 0,
Key.KEY_LANG3: 0,
Key.KEY_LANG4: 0,
Key.KEY_LANG5: 0,
Key.KEY_LANG6: 0,
Key.KEY_LANG7: 0,
Key.KEY_LANG8: 0,
Key.KEY_LANG9: 0,
Key.KEY_ALTERNATE_ERASE: 0,
Key.KEY_SYSREQ_ATTENTION: libevdev.EV_KEY.KEY_SYSRQ.value,
Key.KEY_CANCEL: libevdev.EV_KEY.KEY_CANCEL.value,
Key.KEY_CLEAR: libevdev.EV_KEY.KEY_CLEAR.value,
Key.KEY_PRIOR: 0,
Key.KEY_RETURN: 0,
Key.KEY_SEPARATOR: 0,
Key.KEY_OUT: 0,
Key.KEY_OPER: 0,
Key.KEY_CLEAR_AGAIN: 0,
Key.KEY_CRSEL_PROPS: 0,
Key.KEY_EXSEL: 0,
# [xA5 ... 0xDF] = 0,
Key.KEY_LEFTCONTROL: libevdev.EV_KEY.KEY_LEFTCTRL.value,
Key.KEY_LEFTSHIFT: libevdev.EV_KEY.KEY_LEFTSHIFT.value,
Key.KEY_LEFTALT: libevdev.EV_KEY.KEY_LEFTALT.value,
Key.KEY_LEFT_GUI: libevdev.EV_KEY.KEY_LEFTMETA.value,
Key.KEY_RIGHTCONTROL: libevdev.EV_KEY.KEY_RIGHTCTRL.value,
Key.KEY_RIGHTSHIFT: libevdev.EV_KEY.KEY_RIGHTSHIFT.value,
Key.KEY_RIGHTALT: libevdev.EV_KEY.KEY_RIGHTALT.value,
Key.KEY_RIGHT_GUI: libevdev.EV_KEY.KEY_RIGHTMETA.value,
# [0xe8 ... 0xff] = 0,
}
class ConsumerControl(enum.IntEnum):
CC_CONSUMER_CONTROL = 0x01
CC_NUMERIC_KEY_PAD = 0x02
CC_PROGRAMMABLE_BUTTONS = 0x03
CC_MICROPHONE = 0x04
CC_HEADPHONE = 0x05
CC_GRAPHIC_EQUALIZER = 0x06
CC_PLUS_10 = 0x20
CC_PLUS_100 = 0x21
CC_AM_PM = 0x22
CC_POWER = 0x30
CC_RESET = 0x31
CC_SLEEP = 0x32
CC_SLEEP_AFTER = 0x33
CC_SLEEP_MODE = 0x34
CC_ILLUMINATION = 0x35
CC_FUNCTION_BUTTONS = 0x36
CC_MENU = 0x40
CC_MENU_PICK = 0x41
CC_MENU_UP = 0x42
CC_MENU_DOWN = 0x43
CC_MENU_LEFT = 0x44
CC_MENU_RIGHT = 0x45
CC_MENU_ESCAPE = 0x46
CC_MENU_VALUE_INCREASE = 0x47
CC_MENU_VALUE_DECREASE = 0x48
CC_DATA_ON_SCREEN = 0x60
CC_CLOSED_CAPTION = 0x61
CC_CLOSED_CAPTION_SELECT = 0x62
CC_VCR_TV = 0x63
CC_BROADCAST_MODE = 0x64
CC_SNAPSHOT = 0x65
CC_STILL = 0x66
CC_ASPECT = 0x6D
CC_3D_MODE_SELECT = 0x6E
CC_DISPLAY_BRIGHTNESS_INCREMENT = 0x6F
CC_DISPLAY_BRIGHTNESS_DECREMENT = 0x70
CC_DISPLAY_BRIGHTNESS = 0x71
CC_DISPLAY_BACKLIGHT_TOGGLE = 0x72
# CC_DISPLAY_SET_BRIGHTNESS_TO_MINIMUM 0x73
# CC_DISPLAY_SET_BRIGHTNESS_TO_MAXIMUM 0x74
CC_DISPLAY_SET_AUTO_BRIGHTNESS = 0x75
CC_SELECTION = 0x80
CC_ASSIGN_SELECTION = 0x81
CC_MODE_STEP = 0x82
CC_RECALL_LAST = 0x83
CC_ENTER_CHANNEL = 0x84
CC_ORDER_MOVIE = 0x85
CC_CHANNEL = 0x86
CC_MEDIA_SELECTION = 0x87
CC_MEDIA_SELECT_COMPUTER = 0x88
CC_MEDIA_SELECT_TV = 0x89
CC_MEDIA_SELECT_WWW = 0x8A
CC_MEDIA_SELECT_DVD = 0x8B
CC_MEDIA_SELECT_TELEPHONE = 0x8C
CC_MEDIA_SELECT_PROGRAM_GUIDE = 0x8D
CC_MEDIA_SELECT_VIDEO_PHONE = 0x8E
CC_MEDIA_SELECT_GAMES = 0x8F
CC_MEDIA_SELECT_MESSAGES = 0x90
CC_MEDIA_SELECT_CD = 0x91
CC_MEDIA_SELECT_VCR = 0x92
CC_MEDIA_SELECT_TUNER = 0x93
CC_QUIT = 0x94
CC_HELP = 0x95
CC_MEDIA_SELECT_TAPE = 0x96
CC_MEDIA_SELECT_CABLE = 0x97
CC_MEDIA_SELECT_SATELLITE = 0x98
CC_MEDIA_SELECT_SECURITY = 0x99
CC_MEDIA_SELECT_HOME = 0x9A
CC_MEDIA_SELECT_CALL = 0x9B
CC_CHANNEL_INCREMENT = 0x9C
CC_CHANNEL_DECREMENT = 0x9D
CC_MEDIA_SELECT_SAP = 0x9E
CC_VCR_PLUS = 0xA0
CC_ONCE = 0xA1
CC_DAILY = 0xA2
CC_WEEKLY = 0xA3
CC_MONTHLY = 0xA4
CC_PLAY = 0xB0
CC_PAUSE = 0xB1
CC_RECORD = 0xB2
CC_FAST_FORWARD = 0xB3
CC_REWIND = 0xB4
CC_SCAN_NEXT_TRACK = 0xB5
CC_SCAN_PREVIOUS_TRACK = 0xB6
CC_STOP = 0xB7
CC_EJECT = 0xB8
CC_RANDOM_PLAY = 0xB9
CC_SELECT_DISC = 0xBA
CC_ENTER_DISC = 0xBB
CC_REPEAT = 0xBC
CC_TRACKING = 0xBD
CC_TRACK_NORMAL = 0xBE
CC_SLOW_TRACKING = 0xBF
CC_FRAME_FORWARD = 0xC0
CC_FRAME_BACK = 0xC1
CC_MARK = 0xC2
CC_CLEAR_MARK = 0xC3
CC_REPEAT_FROM_MARK = 0xC4
CC_RETURN_TO_MARK = 0xC5
CC_SEARCH_MARK_FORWARD = 0xC6
CC_SEARCH_MARK_BACKWARDS = 0xC7
CC_COUNTER_RESET = 0xC8
CC_SHOW_COUNTER = 0xC9
CC_TRACKING_INCREMENT = 0xCA
CC_TRACKING_DECREMENT = 0xCB
CC_STOP_EJECT = 0xCC
CC_PLAY_PAUSE = 0xCD
CC_PLAY_SKIP = 0xCE
CC_VOICE_COMMAND = 0xCF
CC_VOLUME = 0xE0
CC_BALANCE = 0xE1
CC_MUTE = 0xE2
CC_BASS = 0xE3
CC_TREBLE = 0xE4
CC_BASS_BOOST = 0xE5
CC_SURROUND_MODE = 0xE6
CC_LOUDNESS = 0xE7
CC_MPX = 0xE8
CC_VOLUME_UP = 0xE9
CC_VOLUME_DOWN = 0xEA
CC_SPEED_SELECT = 0xF0
CC_PLAYBACK_SPEED = 0xF1
CC_STANDARD_PLAY = 0xF2
CC_LONG_PLAY = 0xF3
CC_EXTENDED_PLAY = 0xF4
CC_SLOW = 0xF5
CC_FAN_ENABLE = 0x100
CC_FAN_SPEED = 0x101
CC_LIGHT_ENABLE = 0x102
CC_LIGHT_ILLUMINATION_LEVEL = 0x103
CC_CLIMATE_CONTROL_ENABLE = 0x104
CC_ROOM_TEMPERATURE = 0x105
CC_SECURITY_ENABLE = 0x106
CC_FIRE_ALARM = 0x107
CC_POLICE_ALARM = 0x108
CC_PROXIMITY = 0x109
CC_MOTION = 0x10A
CC_DURESS_ALARM = 0x10B
CC_HOLDUP_ALARM = 0x10C
CC_MEDICAL_ALARM = 0x10D
CC_BALANCE_RIGHT = 0x150
CC_BALANCE_LEFT = 0x151
CC_BASS_INCREMENT = 0x152
CC_BASS_DECREMENT = 0x153
CC_TREBLE_INCREMENT = 0x154
CC_TREBLE_DECREMENT = 0x155
CC_SPEAKER_SYSTEM = 0x160
CC_CHANNEL_LEFT = 0x161
CC_CHANNEL_RIGHT = 0x162
CC_CHANNEL_CENTER = 0x163
CC_CHANNEL_FRONT = 0x164
CC_CHANNEL_CENTER_FRONT = 0x165
CC_CHANNEL_SIDE = 0x166
CC_CHANNEL_SURROUND = 0x167
CC_CHANNEL_LOW_FREQ_ENHANCEMENT = 0x168
CC_CHANNEL_TOP = 0x169
CC_CHANNEL_UNKNOWN = 0x16A
CC_SUB_CHANNEL = 0x170
CC_SUB_CHANNEL_INCREMENT = 0x171
CC_SUB_CHANNEL_DECREMENT = 0x172
CC_ALTERNATE_AUDIO_INCREMENT = 0x173
CC_ALTERNATE_AUDIO_DECREMENT = 0x174
CC_APPLICATION_LAUNCH_BUTTONS = 0x180
CC_AL_LAUNCH_BUTTON_CONFIG_TOOL = 0x181
CC_AL_PROGRAMMABLE_BUTTON_CONFIG = 0x182
CC_AL_CONSUMER_CONTROL_CONFIG = 0x183
CC_AL_WORD_PROCESSOR = 0x184
CC_AL_TEXT_EDITOR = 0x185
CC_AL_SPREADSHEET = 0x186
CC_AL_GRAPHICS_EDITOR = 0x187
CC_AL_PRESENTATION_APP = 0x188
CC_AL_DATABASE_APP = 0x189
CC_AL_EMAIL_READER = 0x18A
CC_AL_NEWSREADER = 0x18B
CC_AL_VOICEMAIL = 0x18C
CC_AL_CONTACTS_ADDRESS_BOOK = 0x18D
CC_AL_CALENDAR_SCHEDULE = 0x18E
CC_AL_TASK_PROJECT_MANAGER = 0x18F
CC_AL_LOG_JOURNAL_TIMECARD = 0x190
CC_AL_CHECKBOOK_FINANCE = 0x191
CC_AL_CALCULATOR = 0x192
CC_AL_A_VCAPTURE_PLAYBACK = 0x193
CC_AL_LOCAL_MACHINE_BROWSER = 0x194
CC_AL_LAN_WANBROWSER = 0x195
CC_AL_INTERNET_BROWSER = 0x196
CC_AL_REMOTE_NETWORKING_ISPCONNECT = 0x197
CC_AL_NETWORK_CONFERENCE = 0x198
CC_AL_NETWORK_CHAT = 0x199
CC_AL_TELEPHONY_DIALER = 0x19A
CC_AL_LOGON = 0x19B
CC_AL_LOGOFF = 0x19C
CC_AL_LOGON_LOGOFF = 0x19D
CC_AL_TERMINAL_LOCK_SCREENSAVER = 0x19E
CC_AL_CONTROL_PANEL = 0x19F
CC_AL_COMMAND_LINE_PROCESSOR_RUN = 0x1A0
CC_AL_PROCESS_TASK_MANAGER = 0x1A1
CC_AL_SELECT_TASK_APPLICATION = 0x1A2
CC_AL_NEXT_TASK_APPLICATION = 0x1A3
CC_AL_PREVIOUS_TASK_APPLICATION = 0x1A4
CC_AL_PREEMPT_HALT_TASK_APPLICATION = 0x1A5
CC_AL_INTEGRATED_HELP_CENTER = 0x1A6
CC_AL_DOCUMENTS = 0x1A7
CC_AL_THESAURUS = 0x1A8
CC_AL_DICTIONARY = 0x1A9
CC_AL_DESKTOP = 0x1AA
CC_AL_SPELL_CHECK = 0x1AB
CC_AL_GRAMMAR_CHECK = 0x1AC
CC_AL_WIRELESS_STATUS = 0x1AD
CC_AL_KEYBOARD_LAYOUT = 0x1AE
CC_AL_VIRUS_PROTECTION = 0x1AF
CC_AL_ENCRYPTION = 0x1B0
CC_AL_SCREEN_SAVER = 0x1B1
CC_AL_ALARMS = 0x1B2
CC_AL_CLOCK = 0x1B3
CC_AL_FILE_BROWSER = 0x1B4
CC_AL_POWER_STATUS = 0x1B5
CC_AL_IMAGE_BROWSER = 0x1B6
CC_AL_AUDIO_BROWSER = 0x1B7
CC_AL_MOVIE_BROWSER = 0x1B8
CC_AL_DIGITAL_RIGHTS_MANAGER = 0x1B9
CC_AL_DIGITAL_WALLET = 0x1BA
CC_AL_INSTANT_MESSAGING = 0x1BC
CC_AL_OEMFEATURES_TIPS_TUTO_BROWSER = 0x1BD
CC_AL_OEMHELP = 0x1BE
CC_AL_ONLINE_COMMUNITY = 0x1BF
CC_AL_ENTERTAINMENT_CONTENT_BROWSER = 0x1C0
CC_AL_ONLINE_SHOPPING_BROWSER = 0x1C1
CC_AL_SMART_CARD_INFORMATION_HELP = 0x1C2
# CC_AL_MARKET_MONITOR_FINANCE_BROWSER 0x1C3
CC_AL_CUSTOMIZED_CORP_NEWS_BROWSER = 0x1C4
CC_AL_ONLINE_ACTIVITY_BROWSER = 0x1C5
CC_AL_RESEARCH_SEARCH_BROWSER = 0x1C6
CC_AL_AUDIO_PLAYER = 0x1C7
CC_GENERIC_GUIAPPLICATION_CONTROLS = 0x200
CC_AC_NEW = 0x201
CC_AC_OPEN = 0x202
CC_AC_CLOSE = 0x203
CC_AC_EXIT = 0x204
CC_AC_MAXIMIZE = 0x205
CC_AC_MINIMIZE = 0x206
CC_AC_SAVE = 0x207
CC_AC_PRINT = 0x208
CC_AC_PROPERTIES = 0x209
CC_AC_UNDO = 0x21A
CC_AC_COPY = 0x21B
CC_AC_CUT = 0x21C
CC_AC_PASTE = 0x21D
CC_AC_SELECT_ALL = 0x21E
CC_AC_FIND = 0x21F
CC_AC_FINDAND_REPLACE = 0x220
CC_AC_SEARCH = 0x221
CC_AC_GO_TO = 0x222
CC_AC_HOME = 0x223
CC_AC_BACK = 0x224
CC_AC_FORWARD = 0x225
CC_AC_STOP = 0x226
CC_AC_REFRESH = 0x227
CC_AC_PREVIOUS_LINK = 0x228
CC_AC_NEXT_LINK = 0x229
CC_AC_BOOKMARKS = 0x22A
CC_AC_HISTORY = 0x22B
CC_AC_SUBSCRIPTIONS = 0x22C
CC_AC_ZOOM_IN = 0x22D
CC_AC_ZOOM_OUT = 0x22E
CC_AC_ZOOM = 0x22F
CC_AC_FULL_SCREEN_VIEW = 0x230
CC_AC_NORMAL_VIEW = 0x231
CC_AC_VIEW_TOGGLE = 0x232
CC_AC_SCROLL_UP = 0x233
CC_AC_SCROLL_DOWN = 0x234
CC_AC_SCROLL = 0x235
CC_AC_PAN_LEFT = 0x236
CC_AC_PAN_RIGHT = 0x237
CC_AC_PAN = 0x238
CC_AC_NEW_WINDOW = 0x239
CC_AC_TILE_HORIZONTALLY = 0x23A
CC_AC_TILE_VERTICALLY = 0x23B
CC_AC_FORMAT = 0x23C
CC_AC_EDIT = 0x23D
CC_AC_BOLD = 0x23E
CC_AC_ITALICS = 0x23F
CC_AC_UNDERLINE = 0x240
CC_AC_STRIKETHROUGH = 0x241
CC_AC_SUBSCRIPT = 0x242
CC_AC_SUPERSCRIPT = 0x243
CC_AC_ALL_CAPS = 0x244
CC_AC_ROTATE = 0x245
CC_AC_RESIZE = 0x246
CC_AC_FLIPHORIZONTAL = 0x247
CC_AC_FLIP_VERTICAL = 0x248
CC_AC_MIRROR_HORIZONTAL = 0x249
CC_AC_MIRROR_VERTICAL = 0x24A
CC_AC_FONT_SELECT = 0x24B
CC_AC_FONT_COLOR = 0x24C
CC_AC_FONT_SIZE = 0x24D
CC_AC_JUSTIFY_LEFT = 0x24E
CC_AC_JUSTIFY_CENTER_H = 0x24F
CC_AC_JUSTIFY_RIGHT = 0x250
CC_AC_JUSTIFY_BLOCK_H = 0x251
CC_AC_JUSTIFY_TOP = 0x252
CC_AC_JUSTIFY_CENTER_V = 0x253
CC_AC_JUSTIFY_BOTTOM = 0x254
CC_AC_JUSTIFY_BLOCK_V = 0x255
CC_AC_INDENT_DECREASE = 0x256
CC_AC_INDENT_INCREASE = 0x257
CC_AC_NUMBERED_LIST = 0x258
CC_AC_RESTART_NUMBERING = 0x259
CC_AC_BULLETED_LIST = 0x25A
CC_AC_PROMOTE = 0x25B
CC_AC_DEMOTE = 0x25C
CC_AC_YES = 0x25D
CC_AC_NO = 0x25E
CC_AC_CANCEL = 0x25F
CC_AC_CATALOG = 0x260
CC_AC_BUY_CHECKOUT = 0x261
CC_AC_ADDTO_CART = 0x262
CC_AC_EXPAND = 0x263
CC_AC_EXPAND_ALL = 0x264
CC_AC_COLLAPSE = 0x265
CC_AC_COLLAPSE_ALL = 0x266
CC_AC_PRINT_PREVIEW = 0x267
CC_AC_PASTE_SPECIAL = 0x268
CC_AC_INSERT_MODE = 0x269
CC_AC_DELETE = 0x26A
CC_AC_LOCK = 0x26B
CC_AC_UNLOCK = 0x26C
CC_AC_PROTECT = 0x26D
CC_AC_UNPROTECT = 0x26E
CC_AC_ATTACH_COMMENT = 0x26F
CC_AC_DELETE_COMMENT = 0x270
CC_AC_VIEW_COMMENT = 0x271
CC_AC_SELECT_WORD = 0x272
CC_AC_SELECT_SENTENCE = 0x273
CC_AC_SELECT_PARAGRAPH = 0x274
CC_AC_SELECT_COLUMN = 0x275
CC_AC_SELECT_ROW = 0x276
CC_AC_SELECT_TABLE = 0x277
CC_AC_SELECT_OBJECT = 0x278
CC_AC_REDO_REPEAT = 0x279
CC_AC_SORT = 0x27A
CC_AC_SORT_ASCENDING = 0x27B
CC_AC_SORT_DESCENDING = 0x27C
CC_AC_FILTER = 0x27D
CC_AC_SET_CLOCK = 0x27E
CC_AC_VIEW_CLOCK = 0x27F
CC_AC_SELECT_TIME_ZONE = 0x280
CC_AC_EDIT_TIME_ZONES = 0x281
CC_AC_SET_ALARM = 0x282
CC_AC_CLEAR_ALARM = 0x283
CC_AC_SNOOZE_ALARM = 0x284
CC_AC_RESET_ALARM = 0x285
CC_AC_SYNCHRONIZE = 0x286
CC_AC_SEND_RECEIVE = 0x287
CC_AC_SEND_TO = 0x288
CC_AC_REPLY = 0x289
CC_AC_REPLY_ALL = 0x28A
CC_AC_FORWARD_MSG = 0x28B
CC_AC_SEND = 0x28C
CC_AC_ATTACH_FILE | |
"""This module contains the simulation setup and execution.
.. autosummary:
ControlledEnvironment
Controller
.. moduleauthor:: <NAME> <<EMAIL>>
.. moduleauthor:: <NAME> <<EMAIL>>
"""
from simpy.core import Environment
from cs143sim.actors import Flow
from cs143sim.actors import Host
from cs143sim.actors import Link
from cs143sim.actors import Router
from cs143sim.constants import OUTPUT_BUFFER_OCCUPANCY_SCALE_FACTOR
from cs143sim.constants import OUTPUT_FLOW_RATE_SCALE_FACTOR
from cs143sim.constants import OUTPUT_LINK_RATE_SCALE_FACTOR
from cs143sim.constants import INPUT_FILE_RATE_SCALE_FACTOR
from cs143sim.constants import INPUT_FILE_BUFFER_SCALE_FACTOR
from cs143sim.constants import INPUT_FILE_DATA_SCALE_FACTOR
from cs143sim.constants import INPUT_FILE_TIME_SCALE_FACTOR
from cs143sim.constants import INPUT_FILE_DELAY_SCALE_FACTOR
from cs143sim.constants import INPUT_FILE_UPDATE_SCALE_FACTOR
from cs143sim.constants import GENERATE_ROUTER_PACKET_DEFAULT_INTERVAL
from cs143sim.errors import InputFileSyntaxError
from cs143sim.errors import InputFileUnknownReference
from cs143sim.errors import MissingAttribute
from cs143sim.events import FlowStart, RoutingTableOutdated
class ControlledEnvironment(Environment):
"""SimPy :class:`~simpy.core.Environment` with a reference to its
:class:`.Controller`
:param controller: :class:`.Controller` that created the
:class:`~simpy.core.Environment`
"""
def __init__(self, controller):
super(ControlledEnvironment, self).__init__()
self.controller = controller
class Controller:
"""Controller that prepares, starts, and cleans up a run of the simulation
:param str case: path to simulation input file
:ivar env: SimPy simulation :class:`~simpy.core.Environment`
:ivar dict flows: all :class:`Flows <.Flow>` in the simulation
:ivar dict hosts: all :class:`Hosts <.Host>` in the simulation
:ivar dict links: all :class:`Links <.Link>` in the simulation
:ivar dict routers: all :class:`Routers <.Router>` in the simulation
:ivar dict buffer_occupancy: buffer occupancy records for each link;
:class:`Links <.Link>` key to lists of (time, value) tuples
:ivar dict flow_rate: flow rate records for each flow;
:class:`Flows <.Flow>` key to lists of (time, value) tuples
:ivar dict link_rate: link rate records for each link;
:class:`Links <.Link>` key to lists of (time, value) tuples
:ivar dict packet_delay: packet delay records for each flow;
:class:`Flows <.Flow>` key to lists of (time, value) tuples
:ivar dict packet_loss: packet loss records for each link;
:class:`Links <.Link>` key to lists of (time, value) tuples
:ivar dict window_size: window size records for each flow;
:class:`Flows <.Flow>` key to lists of (time, value) tuples
"""
def __init__(self, case='cs143sim/cases/case0.txt'):
self.env = ControlledEnvironment(controller=self)
self.flows = {}
self.hosts = {}
self.links = {}
self.routers = {}
self.buffer_occupancy = {}
self.flow_rate = {}
self.link_rate = {}
self.packet_delay = {}
self.packet_loss = {}
self.window_size = {}
self.algorithm = 0 # default algorithm is specified by
self.read_case(case)
def make_flow(self, name, source, destination, amount, start_time, algorithm):
"""Make a new :class:`.Flow` and add it to `self.flows`
:param str name: new :class:`.Flow` name
:param source: source :class:`.Host`
:param destination: destination :class:`.Host`
:param int amount: amount of data to transfer, in bits
:param float start_time: time the new :class:`.Flow` starts
"""
new_flow = Flow(env=self.env, name=name, source=source, destination=destination,
amount=amount, algorithm=algorithm)
source.flows.append(new_flow)
destination.flows.append(new_flow)
self.flows[name] = new_flow
self.algorithm = algorithm
FlowStart(env=self.env, delay=start_time, flow=new_flow)
def make_host(self, name, ip_address):
"""Make a new :class:`.Host` and add it to `self.hosts`
:param str name: new :class:`.Host` name
:param str ip_address: new :class:`.Host`'s IP address
"""
new_host = Host(env=self.env, name=name, address=ip_address)
self.hosts[name] = new_host
def make_link(self, name, source, destination, rate, delay, buffer_capacity):
"""Make a new :class:`.Host` and add it to `self.hosts`
:param str name: new :class:`.Link` name
:param source: source :class:`.Host` or :class:`.Router`
:param destination: destination :class:`.Host` or :class:`.Router`
:param float rate: rate of data transfer, in Mbps
:param float delay: delay for data transfer, in ms
:param int buffer_capacity: size of receiver :class:`.Buffer`, in KB
"""
new_link = Link(env=self.env, name=name, source=source, destination=destination,
delay=delay, rate=rate, buffer_capacity=buffer_capacity)
# NOTE: Each link is split into two links (one for each direction) in the read_case function
# and appended with 'a' or 'b' on its ID. (e.g. 'L1' becomes 'L1a' and 'L1b')
actor = source
if isinstance(actor, Host):
actor.link = new_link
elif isinstance(actor, Router):
actor.links.append(new_link)
else:
raise Exception('Unknown Source/Destination: ' + actor)
self.links[name] = new_link
def make_router(self, name, ip_address, update_time):
"""Make a new :class:`.Router` and add it to `self.routers`
:param str name: new :class:`.Router` name
:param str ip_address: new :class:`.Router`'s IP Address
"""
new_router = Router(env=self.env, name=name, address=ip_address, update_time=int(update_time))
self.routers[name] = new_router
RoutingTableOutdated(env=self.env, delay=0, router=new_router)
def read_case(self, case):
"""Read input file at path `case` and create actors accordingly
:param str case: path to simulation input file
"""
with open(case, 'rb') as case_file:
# Open the file for line-by-line consumption
obj_type = '' # obj_type holds the current object type (LINK/HOST/Etc)
# to which attributes apply
obj_id = '' # obj_id is the current ID of the object
# These are "simple" attributes that have only 1 argument.
# Not included in this list is the CONNECTS attribute, which has 2 arguments,
# and ID, which requires special processing.
attributes = ('RATE', 'DELAY', 'DATA', 'BUFFER', 'DST', 'SRC', 'START', 'IP', 'ALGORITHM', 'UPDATE')
# Input File Attributes:
# RATE - belongs to a :class:`.Link`, specifies link rate in Mbps (float)
# DELAY - belongs to a :class:`.Link`, specifies link delay in ms (int)
# DATA - belongs to a :class:`.Flow`, specifies amount of data to be transmitted in MegaBytes (int)
# BUFFER - belongs to a :class:`.Link`, specifies buffer size in KiloBytes (int)
# DST - belongs to a :class:`.Link` or :class:`.Flow`, specifies a destination (ID of destination)
# SRC - belongs to a :class:`.Link` or :class:`.Flow`, specifies a source (ID of source)
# START - belongs to a :class:`.Flow`, specifies starting time for that flow in seconds (float)
# IP - belongs to a :class:`.Router` or :class:`.Host`, specifies the IP address of the HOST or ROUTER (str)
# ALGORITHM - belongs to a :class:`.Flow`, specifies the congestion control algorithm for that flow (int)
# UPDATE - belongs to a :class:`.Router`, specifies the time between router table updates in ms (int)
# CONNECTS - belongs to a :class:`.Link`, specifies two Hosts/Routers that are connected by that link (ID ID)
# Note: most of the units above will be converted internally and apply only to the input file.
store_in = {attribute: '' for attribute in attributes} # initialize all attributes to ''
line_number = 0
for case_line in case_file:
line_number += 1
line_comp = case_line.split()
if line_comp == [] and obj_id == '':
obj_id = '' # clear obj_ID and type on empty line
obj_type = ''
continue
try:
# if the line is empty, just set keyword to ''
keyword = line_comp[0].upper()
except AttributeError:
keyword = ''
except IndexError:
keyword = ''
if keyword == '//':
continue # ignore the comment line in the file
elif keyword in ['HOST', 'ROUTER', 'LINK', 'FLOW']:
# if we have a valid obj type, listen for new object attributes
obj_type = keyword
obj_id = ''
elif keyword in attributes:
# store simple attributes in their place in the store_in dictionary
store_in[keyword] = line_comp[1]
elif keyword == 'ID' or (keyword == '' and obj_id != ''):
# if we get a new ID attr (and already were working with another ID attr)
# OR if we read an empty line and there was an ID we were working with
# THEN
# create the object in the simulation, and start a new ID
if obj_id == '':
obj_id = line_comp[1].upper()
elif obj_type == 'LINK':
# if we're getting an additional ID attribute on a LINK
# make sure we have all the attributes available,
# then create the link object
for attribute in ['BUFFER', 'DELAY', 'RATE', 'SRC', 'DST']:
if store_in[attribute] in ['', []]:
# Make sure all the attributes are not empty
raise MissingAttribute(obj_type=obj_type, obj_id=obj_id,
missing_attr=attribute)
# If all the attributes are present, create the object
the_src = '' # temp variables that will point to src/dst instances
the_dst = ''
# Enforce referential integrity (aka check that the specified
# hosts/routers actually exist in the simulation)
for target in [store_in['SRC'], store_in['DST']]:
if target in self.hosts:
if the_src == '':
the_src = self.hosts[target]
else:
the_dst = self.hosts[target]
elif target in self.routers:
if the_src == '':
the_src = self.routers[target]
else:
the_dst = self.routers[target]
else:
raise InputFileUnknownReference(line_number, target +
' is not a valid Host/Router.')
self.make_link(name=obj_id + 'a', source=the_src, destination=the_dst,
rate=float(store_in['RATE']) * INPUT_FILE_RATE_SCALE_FACTOR,
delay=float(store_in['DELAY']) * INPUT_FILE_DELAY_SCALE_FACTOR,
buffer_capacity=int(store_in['BUFFER']) * INPUT_FILE_BUFFER_SCALE_FACTOR)
# Links are split into two, one for each direction (so that they are full-duplex).
self.make_link(name=obj_id + 'b', source=the_dst, destination=the_src,
rate=float(store_in['RATE']) * INPUT_FILE_RATE_SCALE_FACTOR,
delay=float(store_in['DELAY']) * INPUT_FILE_DELAY_SCALE_FACTOR,
buffer_capacity=int(store_in['BUFFER']) * INPUT_FILE_BUFFER_SCALE_FACTOR)
# convert into bits
elif obj_type == 'HOST':
| |
function["sum"]["type"] == 'mid':
return_string += '\\tkzDrawRiemannSumMid[%s]' % options
return_string += '\\end{scope}\n'
if return_string != '':
return_string = '%FUNCTIONS\n' + return_string
return return_string
def tikzify_segments(eucl):
return_string = ''
for segment in eucl["segments"]:
if segment["show"] and segment["id"] != 'sg_default':
options = ''
if segment["line_colour_name"] != DEFAULT_SEGMENT_LINE_COLOUR_NAME or\
segment["line_strength"] != DEFAULT_SEGMENT_LINE_STRENGTH:
# if options != "":
# options += ", "
options += "draw=%s" % segment["line_colour_name"]
if segment["line_strength"] != DEFAULT_SEGMENT_LINE_STRENGTH:
options += "!%s" % segment["line_strength"]
if segment["line_stroke"] != DEFAULT_SEGMENT_LINE_STROKE:
if options != "":
options += ", "
if segment["line_stroke"] == "custom":
options += "dash pattern=%s" % line_stroke_custom_to_tkz(segment["line_stroke_custom"])
else:
options += str(segment["line_stroke"])
if segment["line_width"] != DEFAULT_SEGMENT_LINE_WIDTH:
if options != "":
options += ", "
options += "line width=%s pt" % segment["line_width"]
if segment["line_opacity"] != DEFAULT_SEGMENT_LINE_OPACITY:
if options != "":
options += ", "
options += "draw opacity=%s, fill opacity=%s" % (segment["line_opacity"], segment["line_opacity"])
if segment["o_arrow"]["tip"] != DEFAULT_SEGMENT_O_ARROW_TIP or\
segment["d_arrow"]["tip"] != DEFAULT_SEGMENT_D_ARROW_TIP:
if options != "":
options += ", "
if segment["o_arrow"]["tip"] != DEFAULT_SEGMENT_O_ARROW_TIP:
o_arrow_name, o_arrow_options = arrow_tip_to_tkz_option(segment["o_arrow"]["tip"])
if segment["o_arrow"]["length"] != DEFAULT_SEGMENT_O_ARROW_LENGTH:
if o_arrow_options != "":
o_arrow_options += ", "
o_arrow_options += "scale length=%f" % segment["o_arrow"]["length"]
if segment["o_arrow"]["width"] != DEFAULT_SEGMENT_O_ARROW_WIDTH:
if o_arrow_options != "":
o_arrow_options += ", "
o_arrow_options += "scale width=%f" % segment["o_arrow"]["width"]
if segment["o_arrow"]["side"] != DEFAULT_SEGMENT_O_ARROW_SIDE:
if o_arrow_options != "":
o_arrow_options += ", "
o_arrow_options += segment["o_arrow"]["side"]
if segment["o_arrow"]["reversed"]:
if o_arrow_options != "":
o_arrow_options += ", "
o_arrow_options += "reversed"
if segment["d_arrow"]["tip"] != DEFAULT_SEGMENT_D_ARROW_TIP:
d_arrow_name, d_arrow_options = arrow_tip_to_tkz_option(segment["d_arrow"]["tip"])
if segment["d_arrow"]["length"] != DEFAULT_SEGMENT_D_ARROW_LENGTH:
if d_arrow_options != "":
d_arrow_options += ", "
d_arrow_options += "scale length=%f" % segment["d_arrow"]["length"]
if segment["d_arrow"]["width"] != DEFAULT_SEGMENT_D_ARROW_WIDTH:
if d_arrow_options != "":
d_arrow_options += ", "
d_arrow_options += "scale width=%f" % segment["d_arrow"]["width"]
if segment["d_arrow"]["side"] != DEFAULT_SEGMENT_D_ARROW_SIDE:
if d_arrow_options != "":
d_arrow_options += ", "
d_arrow_options += segment["d_arrow"]["side"]
if segment["d_arrow"]["reversed"]:
if d_arrow_options != "":
d_arrow_options += ", "
d_arrow_options += "reversed"
if segment["o_arrow"]["tip"] != DEFAULT_SEGMENT_O_ARROW_TIP and\
segment["d_arrow"]["tip"] == DEFAULT_SEGMENT_D_ARROW_TIP:
options += "arrows={%s[%s]-}" % (o_arrow_name, o_arrow_options)
elif segment["o_arrow"]["tip"] == DEFAULT_SEGMENT_O_ARROW_TIP and\
segment["d_arrow"]["tip"] != DEFAULT_SEGMENT_D_ARROW_TIP:
options += "arrows={-%s[%s]}" % (d_arrow_name, d_arrow_options)
else:
options += "arrows={%s[%s]-%s[%s]}" % (o_arrow_name, o_arrow_options, d_arrow_name, d_arrow_options)
if segment["extension"]["origin"] != DEFAULT_SEGMENT_O_EXTENSION or\
segment["extension"]["destination"] != DEFAULT_SEGMENT_D_EXTENSION:
if options != "":
options += ", "
options += "add=%f and %f" % (segment["extension"]["origin"], segment["extension"]["destination"])
if segment["line_width"] != DEFAULT_SEGMENT_LINE_WIDTH:
return_string += "\\begin{scope}\n"
if options == '':
return_string += "\\tkzDrawSegment(%s,%s)\n" % (segment["points"]["from"], segment["points"]["to"])
else:
return_string += "\\tkzDrawSegment[%s](%s,%s)\n" % (options, segment["points"]["from"], segment["points"]["to"])
if segment["line_width"] != DEFAULT_SEGMENT_LINE_WIDTH:
return_string += "\\end{scope}\n"
if return_string != '':
return_string = '%SEGMENTS\n' + return_string
return return_string
def tikzify_segment_marks(eucl):
return_string = ''
for segment in eucl["segments"]:
if segment["id"] == 'sg_default':
continue
if segment["mark"]["symbol"] != DEFAULT_SEGMENT_MARK_SYMBOL:
if segment["mark"]["width"] != DEFAULT_SEGMENT_MARK_WIDTH or\
(segment["mark"]["colour"] == DEFAULT_SEGMENT_MARK_COLOUR and\
segment["line_opacity"] != DEFAULT_SEGMENT_LINE_OPACITY):
options = ''
if segment["mark"]["width"] != DEFAULT_SEGMENT_MARK_WIDTH:
options += "line width=%f" % segment["mark"]["width"]
if segment["mark"]["colour"] == DEFAULT_SEGMENT_MARK_COLOUR and\
segment["line_opacity"] != DEFAULT_SEGMENT_LINE_OPACITY:
if options != "":
options += ", "
options += "fill opacity=%f, draw opacity=%f" % (segment["line_opacity"] ,segment["line_opacity"])
return_string += "\\begin{scope}[%s]\n" % options
options = ''
options += "mark=%s" % segment["mark"]["symbol"]
if segment["mark"]["size"] != DEFAULT_SEGMENT_MARK_SIZE:
if options != "":
options += ", "
options += "size=%s" % segment["mark"]["size"]
if segment["mark"]["colour"] == DEFAULT_SEGMENT_MARK_COLOUR:
if segment["line_colour_name"] != DEFAULT_SEGMENT_LINE_COLOUR_NAME or\
segment["line_strength"] != DEFAULT_SEGMENT_LINE_STRENGTH:
if options != "":
options += ", "
options += "color=%s" % segment["line_colour_name"]
if segment["line_strength"] != DEFAULT_SEGMENT_LINE_STRENGTH:
options += "!%s" % segment["line_strength"]
else:
if segment["mark"]["colour"] != DEFAULT_SEGMENT_LINE_COLOUR_NAME:
if options != "":
options += ", "
options += "color=%s" % segment["mark"]["colour"]
if segment["mark"]["position"] != DEFAULT_SEGMENT_MARK_POSITION:
if options != "":
options += ", "
options += "pos=%s" % segment["mark"]["position"]
return_string += "\\tkzMarkSegment[%s](%s,%s)" % (options, segment["points"]["from"], segment["points"]["to"])
if segment["mark"]["width"] != DEFAULT_SEGMENT_MARK_WIDTH or\
(segment["mark"]["colour"] == DEFAULT_SEGMENT_MARK_COLOUR and\
segment["line_opacity"] != DEFAULT_SEGMENT_LINE_OPACITY):
return_string += "\\end{scope}"
if return_string != '':
return_string = '%SEGMENT MARKS\n' + return_string
return return_string
def tikzify_filled_angles(eucl):
return_string = ''
for angle in eucl["angles"]:
if angle["id"] != 'ang_default' and angle["show"] and not angle["right_angle"]:
if angle["fill_opacity"] != DEFAULT_ANGLE_FILL_OPACITY:
return_string += "\\begin{scope}[fill opacity=%s]" % angle["fill_opacity"]
options = 'size=%s, ' % angle["size"]
if angle["fill_colour_name"] != DEFAULT_ANGLE_FILL_COLOUR_NAME:
options += "fill=%s" % angle["fill_colour_name"]
if angle["fill_strength"] != DEFAULT_ANGLE_FILL_STRENGTH:
options += "!%s" % angle["fill_strength"]
return_string += "\\tkzFillAngle[%s](%s,%s,%s)\n" % (options, angle["points"]["A"], angle["points"]["B"], angle["points"]["C"])
if angle["fill_opacity"] != DEFAULT_ANGLE_FILL_OPACITY:
return_string += "\\end{scope}\n"
if return_string != '':
return_string = '%FILLED ANGLES\n' + return_string
return return_string
def tikzify_angles(eucl):
return_string = ''
for angle in eucl["angles"]:
if angle["id"] != 'ang_default' and angle["show"]:
if angle["line_width"] != DEFAULT_ANGLE_LINE_WIDTH or\
angle["line_opacity"] != DEFAULT_ANGLE_FILL_OPACITY:
options = 'line width=%s' % angle["line_width"]
if angle["line_opacity"] != DEFAULT_ANGLE_FILL_OPACITY:
options += ", draw opacity=%s" % angle["line_opacity"]
return_string += "\\begin{scope}[%s]\n" % options
options = ''
if angle["right_angle"]:
if angle["type"] != DEFAULT_RIGHT_ANGLE_TYPE:
options += 'german'
if options != '':
options += ', '
options += "size=%s" % (angle["size"])
return_string += "\\tkzMarkRightAngle[%s](%s,%s,%s)\n" % (options, angle["points"]["A"], angle["points"]["B"], angle["points"]["C"])
else:
options += "size=%s cm" % (angle["size"])
if angle["arc"] != DEFAULT_ANGLE_ARC:
if options != '':
options += ', '
options += "arc=%s" % (angle["arc"] * 'l')
if options != '':
options += ', '
options += "mark=%s" % angle["mksymbol"]
if angle["mksize"] != DEFAULT_ANGLE_MARK_SIZE:
if options != '':
options += ', '
options += "mksize=%s pt" % angle["mksize"]
if angle["mkcolour"] != DEFAULT_ANGLE_MARK_COLOUR:
if options != '':
options += ', '
options += "mkcolor=%s" % angle["mkcolour"]
if angle["mkpos"] != DEFAULT_ANGLE_MARK_POSITION:
if options != '':
options += ', '
options += "mkpos=%s" % angle["mkpos"]
if angle["line_colour_name"] != DEFAULT_ANGLE_LINE_COLOUR_NAME or\
angle["line_strength"] != DEFAULT_ANGLE_LINE_STRENGTH:
if options != "":
options += ", "
options += "color=%s" % angle["line_colour_name"]
if angle["line_strength"] != DEFAULT_POINT_LINE_STRENGTH:
options += "!%s" % angle["line_strength"]
if angle["line_stroke"] != DEFAULT_ANGLE_LINE_STROKE:
if options != "":
options += ", "
if angle["line_stroke"] == "custom":
options += "dash pattern=%s" % line_stroke_custom_to_tkz(angle["line_stroke_custom"])
else:
options += str(angle["line_stroke"])
if angle["o_arrow"]["tip"] != DEFAULT_SEGMENT_O_ARROW_TIP or\
angle["d_arrow"]["tip"] != DEFAULT_SEGMENT_D_ARROW_TIP:
if options != "":
options += ", "
if angle["o_arrow"]["tip"] != DEFAULT_SEGMENT_O_ARROW_TIP:
o_arrow_name, o_arrow_options = arrow_tip_to_tkz_option(angle["o_arrow"]["tip"])
if angle["o_arrow"]["length"] != DEFAULT_SEGMENT_O_ARROW_LENGTH:
if o_arrow_options != "":
o_arrow_options += ", "
o_arrow_options += "scale length=%f" % angle["o_arrow"]["length"]
if angle["o_arrow"]["width"] != DEFAULT_SEGMENT_O_ARROW_WIDTH:
if o_arrow_options != "":
o_arrow_options += ", "
o_arrow_options += "scale width=%f" % angle["o_arrow"]["width"]
if angle["o_arrow"]["side"] != DEFAULT_SEGMENT_O_ARROW_SIDE:
if o_arrow_options != "":
o_arrow_options += ", "
o_arrow_options += angle["o_arrow"]["side"]
if angle["o_arrow"]["reversed"]:
if o_arrow_options != "":
o_arrow_options += ", "
o_arrow_options += "reversed"
if angle["d_arrow"]["tip"] != DEFAULT_SEGMENT_D_ARROW_TIP:
d_arrow_name, d_arrow_options = arrow_tip_to_tkz_option(angle["d_arrow"]["tip"])
if angle["d_arrow"]["length"] != DEFAULT_SEGMENT_D_ARROW_LENGTH:
if d_arrow_options != "":
d_arrow_options += ", "
d_arrow_options += "scale length=%f" % angle["d_arrow"]["length"]
if angle["d_arrow"]["width"] != DEFAULT_SEGMENT_D_ARROW_WIDTH:
if d_arrow_options != "":
d_arrow_options += ", "
d_arrow_options += "scale width=%f" % angle["d_arrow"]["width"]
if angle["d_arrow"]["side"] != DEFAULT_SEGMENT_D_ARROW_SIDE:
if d_arrow_options != "":
d_arrow_options += ", "
d_arrow_options += angle["d_arrow"]["side"]
if angle["d_arrow"]["reversed"]:
if d_arrow_options != "":
d_arrow_options += ", "
d_arrow_options += "reversed"
if angle["o_arrow"]["tip"] != DEFAULT_SEGMENT_O_ARROW_TIP and\
angle["d_arrow"]["tip"] == DEFAULT_SEGMENT_D_ARROW_TIP:
options += "arrows={%s[%s]-}" % (o_arrow_name, o_arrow_options)
elif angle["o_arrow"]["tip"] == DEFAULT_SEGMENT_O_ARROW_TIP and\
angle["d_arrow"]["tip"] != DEFAULT_SEGMENT_D_ARROW_TIP:
options += "arrows={-%s[%s]}" % (d_arrow_name, d_arrow_options)
else:
options += "arrows={%s[%s]-%s[%s]}" % (o_arrow_name, o_arrow_options, d_arrow_name, d_arrow_options)
return_string += "\\tkzMarkAngle[%s](%s,%s,%s)\n" % (options, angle["points"]["A"], angle["points"]["B"], angle["points"]["C"])
if angle["line_width"] != DEFAULT_ANGLE_LINE_WIDTH or\
angle["line_opacity"] != DEFAULT_ANGLE_FILL_OPACITY:
return_string += "\\end{scope}\n"
if return_string != '':
return_string = '%ANGLES\n' + return_string
return return_string
def tikzify_angle_labels(eucl):
return_string = ''
for angle in eucl["angles"]:
if angle["id"] != 'ang_default' and angle["label"]["show"]:
options = ""
if angle["label"]["distance"] != DEFAULT_ANGLE_LABEL_DISTANCE:
options += "pos=%s" % angle["label"]["distance"]
if angle["label"]["anchor"] != DEFAULT_ANGLE_LABEL_ANCHOR:
if options != '':
options += ', '
options += "%s" % angle["label"]["anchor"]
return_string += "\\tkzLabelAngle[%s](%s,%s,%s){%s}" % (options, angle["points"]["A"], angle["points"]["B"], angle["points"]["C"], angle["label"]["text"])
if return_string != '':
return_string = '%ANGLE LABELS\n' + return_string
return return_string
def tikzify_segment_labels(eucl):
return_string = ''
for segment in eucl["segments"]:
if segment["id"] == 'sg_default':
continue
if segment["label"]["show"]:
options = ''
if segment["label"]["anchor"] != DEFAULT_SEGMENT_LABEL_ANCHOR:
options += "%s" % segment["label"]["anchor"]
if segment["label"]["position"] != DEFAULT_SEGMENT_LABEL_POSITION:
if options != "":
options += ", "
options += "pos=%s" % segment["label"]["position"]
if segment["label"]["angle"] != DEFAULT_SEGMENT_LABEL_ANGLE or\
segment["label"]["distance"] != DEFAULT_SEGMENT_LABEL_DISTANCE:
if options != "":
options += ", "
options += "shift={(%s:%s)}" % (segment["label"]["angle"], segment["label"]["distance"])
if options == ' ':
return_string += "\\tkzLabelSegment(%s,%s){%s}\n" % (segment["points"]["from"], segment["points"]["to"], segment["label"]["text"])
else:
return_string += "\\tkzLabelSegment[%s](%s,%s){%s}\n" % (options, segment["points"]["from"], segment["points"]["to"], | |
skin tone': u'\U0000261d\U0001f3fe',
u'index pointing up dark skin tone': u'\U0000261d\U0001f3ff',
u'backhand index pointing up': u'\U0001f446',
u'backhand index pointing up light skin tone': u'\U0001f446\U0001f3fb',
u'backhand index pointing up mediumlight skin tone': u'\U0001f446\U0001f3fc',
u'backhand index pointing up medium skin tone': u'\U0001f446\U0001f3fd',
u'backhand index pointing up mediumdark skin tone': u'\U0001f446\U0001f3fe',
u'backhand index pointing up dark skin tone': u'\U0001f446\U0001f3ff',
u'middle finger': u'\U0001f595',
u'middle finger light skin tone': u'\U0001f595\U0001f3fb',
u'middle finger mediumlight skin tone': u'\U0001f595\U0001f3fc',
u'middle finger medium skin tone': u'\U0001f595\U0001f3fd',
u'middle finger mediumdark skin tone': u'\U0001f595\U0001f3fe',
u'middle finger dark skin tone': u'\U0001f595\U0001f3ff',
u'backhand index pointing down': u'\U0001f447',
u'backhand index pointing down light skin tone': u'\U0001f447\U0001f3fb',
u'backhand index pointing down mediumlight skin tone': u'\U0001f447\U0001f3fc',
u'backhand index pointing down medium skin tone': u'\U0001f447\U0001f3fd',
u'backhand index pointing down mediumdark skin tone': u'\U0001f447\U0001f3fe',
u'backhand index pointing down dark skin tone': u'\U0001f447\U0001f3ff',
u'victory hand': u'\U0000270c\U0000fe0f',
u'victory hand light skin tone': u'\U0000270c\U0001f3fb',
u'victory hand mediumlight skin tone': u'\U0000270c\U0001f3fc',
u'victory hand medium skin tone': u'\U0000270c\U0001f3fd',
u'victory hand mediumdark skin tone': u'\U0000270c\U0001f3fe',
u'victory hand dark skin tone': u'\U0000270c\U0001f3ff',
u'crossed fingers': u'\U0001f91e',
u'crossed fingers light skin tone': u'\U0001f91e\U0001f3fb',
u'crossed fingers mediumlight skin tone': u'\U0001f91e\U0001f3fc',
u'crossed fingers medium skin tone': u'\U0001f91e\U0001f3fd',
u'crossed fingers mediumdark skin tone': u'\U0001f91e\U0001f3fe',
u'crossed fingers dark skin tone': u'\U0001f91e\U0001f3ff',
u'vulcan salute': u'\U0001f596',
u'vulcan salute light skin tone': u'\U0001f596\U0001f3fb',
u'vulcan salute mediumlight skin tone': u'\U0001f596\U0001f3fc',
u'vulcan salute medium skin tone': u'\U0001f596\U0001f3fd',
u'vulcan salute mediumdark skin tone': u'\U0001f596\U0001f3fe',
u'vulcan salute dark skin tone': u'\U0001f596\U0001f3ff',
u'sign of the horns': u'\U0001f918',
u'sign of the horns light skin tone': u'\U0001f918\U0001f3fb',
u'sign of the horns mediumlight skin tone': u'\U0001f918\U0001f3fc',
u'sign of the horns medium skin tone': u'\U0001f918\U0001f3fd',
u'sign of the horns mediumdark skin tone': u'\U0001f918\U0001f3fe',
u'sign of the horns dark skin tone': u'\U0001f918\U0001f3ff',
u'call me hand': u'\U0001f919',
u'call me hand light skin tone': u'\U0001f919\U0001f3fb',
u'call me hand mediumlight skin tone': u'\U0001f919\U0001f3fc',
u'call me hand medium skin tone': u'\U0001f919\U0001f3fd',
u'call me hand mediumdark skin tone': u'\U0001f919\U0001f3fe',
u'call me hand dark skin tone': u'\U0001f919\U0001f3ff',
u'hand with fingers splayed': u'\U0001f590\U0000fe0f',
u'hand with fingers splayed light skin tone': u'\U0001f590\U0001f3fb',
u'hand with fingers splayed mediumlight skin tone': u'\U0001f590\U0001f3fc',
u'hand with fingers splayed medium skin tone': u'\U0001f590\U0001f3fd',
u'hand with fingers splayed mediumdark skin tone': u'\U0001f590\U0001f3fe',
u'hand with fingers splayed dark skin tone': u'\U0001f590\U0001f3ff',
u'raised hand': u'\U0000270b',
u'raised hand light skin tone': u'\U0000270b\U0001f3fb',
u'raised hand mediumlight skin tone': u'\U0000270b\U0001f3fc',
u'raised hand medium skin tone': u'\U0000270b\U0001f3fd',
u'raised hand mediumdark skin tone': u'\U0000270b\U0001f3fe',
u'raised hand dark skin tone': u'\U0000270b\U0001f3ff',
u'ok hand': u'\U0001f44c',
u'ok hand light skin tone': u'\U0001f44c\U0001f3fb',
u'ok hand mediumlight skin tone': u'\U0001f44c\U0001f3fc',
u'ok hand medium skin tone': u'\U0001f44c\U0001f3fd',
u'ok hand mediumdark skin tone': u'\U0001f44c\U0001f3fe',
u'ok hand dark skin tone': u'\U0001f44c\U0001f3ff',
u'thumbs up': u'\U0001f44d',
u'thumbs up light skin tone': u'\U0001f44d\U0001f3fb',
u'thumbs up mediumlight skin tone': u'\U0001f44d\U0001f3fc',
u'thumbs up medium skin tone': u'\U0001f44d\U0001f3fd',
u'thumbs up mediumdark skin tone': u'\U0001f44d\U0001f3fe',
u'thumbs up dark skin tone': u'\U0001f44d\U0001f3ff',
u'thumbs down': u'\U0001f44e',
u'thumbs down light skin tone': u'\U0001f44e\U0001f3fb',
u'thumbs down mediumlight skin tone': u'\U0001f44e\U0001f3fc',
u'thumbs down medium skin tone': u'\U0001f44e\U0001f3fd',
u'thumbs down mediumdark skin tone': u'\U0001f44e\U0001f3fe',
u'thumbs down dark skin tone': u'\U0001f44e\U0001f3ff',
u'raised fist': u'\U0000270a',
u'raised fist light skin tone': u'\U0000270a\U0001f3fb',
u'raised fist mediumlight skin tone': u'\U0000270a\U0001f3fc',
u'raised fist medium skin tone': u'\U0000270a\U0001f3fd',
u'raised fist mediumdark skin tone': u'\U0000270a\U0001f3fe',
u'raised fist dark skin tone': u'\U0000270a\U0001f3ff',
u'oncoming fist': u'\U0001f44a',
u'oncoming fist light skin tone': u'\U0001f44a\U0001f3fb',
u'oncoming fist mediumlight skin tone': u'\U0001f44a\U0001f3fc',
u'oncoming fist medium skin tone': u'\U0001f44a\U0001f3fd',
u'oncoming fist mediumdark skin tone': u'\U0001f44a\U0001f3fe',
u'oncoming fist dark skin tone': u'\U0001f44a\U0001f3ff',
u'leftfacing fist': u'\U0001f91b',
u'leftfacing fist light skin tone': u'\U0001f91b\U0001f3fb',
u'leftfacing fist mediumlight skin tone': u'\U0001f91b\U0001f3fc',
u'leftfacing fist medium skin tone': u'\U0001f91b\U0001f3fd',
u'leftfacing fist mediumdark skin tone': u'\U0001f91b\U0001f3fe',
u'leftfacing fist dark skin tone': u'\U0001f91b\U0001f3ff',
u'rightfacing fist': u'\U0001f91c',
u'rightfacing fist light skin tone': u'\U0001f91c\U0001f3fb',
u'rightfacing fist mediumlight skin tone': u'\U0001f91c\U0001f3fc',
u'rightfacing fist medium skin tone': u'\U0001f91c\U0001f3fd',
u'rightfacing fist mediumdark skin tone': u'\U0001f91c\U0001f3fe',
u'rightfacing fist dark skin tone': u'\U0001f91c\U0001f3ff',
u'raised back of hand': u'\U0001f91a',
u'raised back of hand light skin tone': u'\U0001f91a\U0001f3fb',
u'raised back of hand mediumlight skin tone': u'\U0001f91a\U0001f3fc',
u'raised back of hand medium skin tone': u'\U0001f91a\U0001f3fd',
u'raised back of hand mediumdark skin tone': u'\U0001f91a\U0001f3fe',
u'raised back of hand dark skin tone': u'\U0001f91a\U0001f3ff',
u'waving hand': u'\U0001f44b',
u'waving hand light skin tone': u'\U0001f44b\U0001f3fb',
u'waving hand mediumlight skin tone': u'\U0001f44b\U0001f3fc',
u'waving hand medium skin tone': u'\U0001f44b\U0001f3fd',
u'waving hand mediumdark skin tone': u'\U0001f44b\U0001f3fe',
u'waving hand dark skin tone': u'\U0001f44b\U0001f3ff',
u'loveyou gesture': u'\U0001f91f',
u'loveyou gesture light skin tone': u'\U0001f91f\U0001f3fb',
u'loveyou gesture mediumlight skin tone': u'\U0001f91f\U0001f3fc',
u'loveyou gesture medium skin tone': u'\U0001f91f\U0001f3fd',
u'loveyou gesture mediumdark skin tone': u'\U0001f91f\U0001f3fe',
u'loveyou gesture dark skin tone': u'\U0001f91f\U0001f3ff',
u'writing hand': u'\U0000270d\U0000fe0f',
u'writing hand light skin tone': u'\U0000270d\U0001f3fb',
u'writing hand mediumlight skin tone': u'\U0000270d\U0001f3fc',
u'writing hand medium skin tone': u'\U0000270d\U0001f3fd',
u'writing hand mediumdark skin tone': u'\U0000270d\U0001f3fe',
u'writing hand dark skin tone': u'\U0000270d\U0001f3ff',
u'clapping hands': u'\U0001f44f',
u'clapping hands light skin tone': u'\U0001f44f\U0001f3fb',
u'clapping hands mediumlight skin tone': u'\U0001f44f\U0001f3fc',
u'clapping hands medium skin tone': u'\U0001f44f\U0001f3fd',
u'clapping hands mediumdark skin tone': u'\U0001f44f\U0001f3fe',
u'clapping hands dark skin tone': u'\U0001f44f\U0001f3ff',
u'open hands': u'\U0001f450',
u'open hands light skin tone': u'\U0001f450\U0001f3fb',
u'open hands mediumlight skin tone': u'\U0001f450\U0001f3fc',
u'open hands medium skin tone': u'\U0001f450\U0001f3fd',
u'open hands mediumdark skin tone': u'\U0001f450\U0001f3fe',
u'open hands dark skin tone': u'\U0001f450\U0001f3ff',
u'raising hands': u'\U0001f64c',
u'raising hands light skin tone': u'\U0001f64c\U0001f3fb',
u'raising hands mediumlight skin tone': u'\U0001f64c\U0001f3fc',
u'raising hands medium skin tone': u'\U0001f64c\U0001f3fd',
u'raising hands mediumdark skin tone': u'\U0001f64c\U0001f3fe',
u'raising hands dark skin tone': u'\U0001f64c\U0001f3ff',
u'palms up together': u'\U0001f932',
u'palms up together light skin tone': u'\U0001f932\U0001f3fb',
u'palms up together mediumlight skin tone': u'\U0001f932\U0001f3fc',
u'palms up together medium skin tone': u'\U0001f932\U0001f3fd',
u'palms up together mediumdark skin tone': u'\U0001f932\U0001f3fe',
u'palms up together dark skin tone': u'\U0001f932\U0001f3ff',
u'folded hands': u'\U0001f64f',
u'folded hands light skin tone': u'\U0001f64f\U0001f3fb',
u'folded hands mediumlight skin tone': u'\U0001f64f\U0001f3fc',
u'folded hands medium skin tone': u'\U0001f64f\U0001f3fd',
u'folded hands mediumdark skin tone': u'\U0001f64f\U0001f3fe',
u'folded hands dark skin tone': u'\U0001f64f\U0001f3ff',
u'handshake': u'\U0001f91d',
u'nail polish': u'\U0001f485',
u'nail polish light skin tone': u'\U0001f485\U0001f3fb',
u'nail polish mediumlight skin tone': u'\U0001f485\U0001f3fc',
u'nail polish medium skin tone': u'\U0001f485\U0001f3fd',
u'nail polish mediumdark skin tone': u'\U0001f485\U0001f3fe',
u'nail polish dark skin tone': u'\U0001f485\U0001f3ff',
u'ear': u'\U0001f442',
u'ear light skin tone': u'\U0001f442\U0001f3fb',
u'ear mediumlight skin tone': u'\U0001f442\U0001f3fc',
u'ear medium skin tone': u'\U0001f442\U0001f3fd',
u'ear mediumdark skin tone': u'\U0001f442\U0001f3fe',
u'ear dark skin tone': u'\U0001f442\U0001f3ff',
u'nose': u'\U0001f443',
u'nose light skin tone': u'\U0001f443\U0001f3fb',
u'nose mediumlight skin tone': u'\U0001f443\U0001f3fc',
u'nose medium skin tone': u'\U0001f443\U0001f3fd',
u'nose mediumdark skin tone': u'\U0001f443\U0001f3fe',
u'nose dark skin tone': u'\U0001f443\U0001f3ff',
u'redhaired': u'\U0001f9b0',
u'curlyhaired': u'\U0001f9b1',
u'bald': u'\U0001f9b2',
u'whitehaired': u'\U0001f9b3',
u'footprints': u'\U0001f463',
u'eyes': u'\U0001f440',
u'eye': u'\U0001f441\U0000fe0f',
u'eye in speech bubble': u'\U0001f441\U0000fe0f\U0000200d\U0001f5e8\U0000fe0f',
u'brain': u'\U0001f9e0',
u'bone': u'\U0001f9b4',
u'tooth': u'\U0001f9b7',
u'tongue': u'\U0001f445',
u'mouth': u'\U0001f444',
},
u'emotion': {
u'kiss mark': u'\U0001f48b',
u'heart with arrow': u'\U0001f498',
u'red heart': u'\U00002764\U0000fe0f',
u'beating heart': u'\U0001f493',
u'broken heart': u'\U0001f494',
u'two hearts': u'\U0001f495',
u'sparkling heart': u'\U0001f496',
u'growing heart': u'\U0001f497',
u'blue heart': u'\U0001f499',
u'green heart': u'\U0001f49a',
u'yellow heart': u'\U0001f49b',
u'orange heart': u'\U0001f9e1',
u'purple heart': u'\U0001f49c',
u'black heart': u'\U0001f5a4',
u'heart with ribbon': u'\U0001f49d',
u'revolving hearts': u'\U0001f49e',
u'heart decoration': u'\U0001f49f',
u'heavy heart exclamation': u'\U00002763\U0000fe0f',
u'love letter': u'\U0001f48c',
u'zzz': u'\U0001f4a4',
u'anger symbol': u'\U0001f4a2',
u'bomb': u'\U0001f4a3',
u'collision': u'\U0001f4a5',
u'sweat droplets': u'\U0001f4a6',
u'dashing away': u'\U0001f4a8',
u'dizzy': u'\U0001f4ab',
u'speech balloon': u'\U0001f4ac',
u'left speech bubble': u'\U0001f5e8\U0000fe0f',
u'right anger bubble': u'\U0001f5ef\U0000fe0f',
u'thought balloon': u'\U0001f4ad',
u'hole': u'\U0001f573\U0000fe0f',
},
u'clothing': {
u'glasses': u'\U0001f453',
u'sunglasses': u'\U0001f576\U0000fe0f',
u'goggles': u'\U0001f97d',
u'lab coat': u'\U0001f97c',
u'necktie': u'\U0001f454',
u'tshirt': u'\U0001f455',
u'jeans': u'\U0001f456',
u'scarf': u'\U0001f9e3',
u'gloves': u'\U0001f9e4',
u'coat': u'\U0001f9e5',
u'socks': u'\U0001f9e6',
u'dress': u'\U0001f457',
u'kimono': u'\U0001f458',
u'bikini': u'\U0001f459',
u'womans clothes': u'\U0001f45a',
u'purse': u'\U0001f45b',
u'handbag': u'\U0001f45c',
u'clutch bag': u'\U0001f45d',
u'shopping bags': u'\U0001f6cd\U0000fe0f',
u'school backpack': u'\U0001f392',
u'mans shoe': u'\U0001f45e',
u'running shoe': u'\U0001f45f',
u'hiking boot': u'\U0001f97e',
u'womans flat shoe': u'\U0001f97f',
u'highheeled shoe': u'\U0001f460',
u'womans sandal': u'\U0001f461',
u'womans boot': u'\U0001f462',
u'crown': u'\U0001f451',
u'womans hat': u'\U0001f452',
u'top hat': u'\U0001f3a9',
u'graduation cap': u'\U0001f393',
u'billed cap': u'\U0001f9e2',
u'rescue workers helmet': u'\U000026d1\U0000fe0f',
u'prayer beads': u'\U0001f4ff',
u'lipstick': u'\U0001f484',
u'ring': u'\U0001f48d',
u'gem stone': u'\U0001f48e',
},
u'animal-mammal': {
u'monkey face': u'\U0001f435',
u'monkey': u'\U0001f412',
u'gorilla': u'\U0001f98d',
u'dog face': u'\U0001f436',
u'dog': u'\U0001f415',
u'poodle': u'\U0001f429',
u'wolf face': u'\U0001f43a',
u'fox face': u'\U0001f98a',
u'raccoon': u'\U0001f99d',
u'cat face': u'\U0001f431',
| |
-109.0, -113.0,
-117.0, -121.0, -125.0, -129.0, -133.0, -137.0, -141.0, -145.0, -149.0, -153.0, -157.0, -161.0, -165.0, -169.0,
-173.0, -177.0, -181.0, -185.0]
SDNoise = [-200.0, -200.0, -200.0, -200.0, -200.0, -200.0, -200.0, -200.0, -200.0, -200.0, -200.0, -200.0,
-200.0, -200.0, -200.0, -200.0, -192.0, -184.0, -176.0, -167.99999999999997, -160.0, -152.0, -144.0, -136.0,
-128.0, -120.0, -112.0, -104.0, -96.0, -88.0, -80.0]
return f, XTALNoise, PFDCPNoise, PrescalerNoise, VCONoise, SDNoise
def writeResults(C1,C2,C3,R2,R3,f,magCL,magOL,phaseOL,magvcoTF,PFDCPNoiseOut,
PrescalerNoiseOut,VCONoiseOut,R2NoiseOut,R3NoiseOut,XTALNoiseOut,SDNoiseOut,
TotalNoise,t,fT,lockTime_0p001Pcnt, lockTime_0p0001Pcnt, lockTime_0p00001Pcnt,
lockTime_0p000001Pcnt, f2, fInterpol, TotalNoiseV2HzInterpol, enteredKphi,
enteredKVCO, enteredPM, enteredLoopBW, enteredFout, enteredFref, enteredR,
enteredP, enteredT31, enteredGamma, noiseWorkbook,PFDCPNoise, XTALNoise,
PrescalerNoise, VCONoise, SDNoise):
'''
Writes every result to an Excel file if user chooses to generate Excel report.
Relies on xlwt module
'''
book = Workbook()
whiteCell = easyxf("pattern: fore_colour white, pattern solid;")
parameter = easyxf('font: name Arial, bold True, height 280; alignment: horizontal center')
parameterValue = easyxf('font: name Arial, height 280;' 'borders: left thick, right thick;' 'alignment: horizontal center;' 'pattern: pattern solid, fore_colour white;', num_format_str='0.000E+00')
parameterValue2 = easyxf('font: name Arial, height 280;' 'borders: left thick, right thick;' 'alignment: horizontal center;' 'pattern: pattern solid, fore_colour white;', num_format_str='0.000')
parameterValueRed = easyxf('font: name Arial, bold True, height 280, colour red;' 'alignment: horizontal center', num_format_str='0.000E+00')
parameterValue2Red = easyxf('font: name Arial, bold True, height 280, colour red;' 'alignment: horizontal center', num_format_str='0.000')
parameterValue3 = easyxf('font: name Arial, height 280;' 'borders: left thick, right thick;' 'alignment: horizontal center;' 'pattern: pattern solid, fore_colour white;', num_format_str='0.000000%')
columnHeader = easyxf('font: name Arial, bold True, height 280; alignment: horizontal center')
notes = easyxf('font: name Arial, bold True, height 280; alignment: horizontal left;' "pattern: fore_colour white, pattern solid;")
notesRed = easyxf('font: name Arial, bold True, height 280, colour red; alignment: horizontal left;' "pattern: fore_colour white, pattern solid;")
link = easyxf('font: name Arial, bold True, italic True, height 240, underline single, colour red; alignment: horizontal left;' "pattern: fore_colour white, pattern solid;")
linkContact = easyxf('font: name Arial, bold True, italic True, height 240, underline single, colour black; alignment: horizontal left;' "pattern: fore_colour white, pattern solid;")
columnHeaderBorderL = easyxf('font: name Arial, bold True, height 280;' 'borders: left thick;' 'alignment: horizontal center;' 'pattern: pattern solid, fore_colour white;')
columnHeaderBorderLRed = easyxf('font: name Arial, bold True, height 280, colour red;' 'borders: left thick;' 'alignment: horizontal center;' 'pattern: pattern solid, fore_colour gray25;')
columnHeaderBorderBLRed = easyxf('font: name Arial, bold True, height 280, colour red;' 'borders: left thick, bottom thick;' 'alignment: horizontal center;' 'pattern: pattern solid, fore_colour gray25;')
columnHeaderBorderTL = easyxf('font: name Arial, bold True, height 280;' 'borders: left thick, top thick;' 'alignment: horizontal center;' 'pattern: pattern solid, fore_colour white;')
columnHeaderBorderBL = easyxf('font: name Arial, bold True, height 280;' 'borders: left thick, bottom thick;' 'alignment: horizontal center;' 'pattern: pattern solid, fore_colour white;')
columnHeaderBorderTLBR = easyxf('font: name Arial, bold True, height 280;' 'borders: left thick, top thick, bottom thick, right thick;' 'alignment: horizontal center;' 'pattern: pattern solid, fore_colour gray25;')
columnHeaderBorderTLBRAlignleft = easyxf('font: name Arial, bold True, height 280;' 'borders: left thick, top thick, bottom thick, right thick;' 'alignment: horizontal left;' 'pattern: pattern solid, fore_colour gray25;')
parameterValue2BorderBR = easyxf('font: name Arial, height 280;' 'borders: right thick, bottom thick;' 'alignment: horizontal center;' 'pattern: pattern solid, fore_colour gray25;', num_format_str='0.000')
parameterValue2BorderR = easyxf('font: name Arial, height 280;' 'borders: right thick;' 'alignment: horizontal center;' 'pattern: pattern solid, fore_colour gray25;', num_format_str='0.000')
parameterValueBorderR = easyxf('font: name Arial, height 280;' 'borders: right thick;' 'alignment: horizontal center;' 'pattern: pattern solid, fore_colour gray25;', num_format_str='0.000E+00')
parameterValueBorderBR = easyxf('font: name Arial, height 280;' 'borders: bottom thick, right thick;' 'alignment: horizontal center;' 'pattern: pattern solid, fore_colour gray25;', num_format_str='0.000E+00')
parameterValueBorderBRWhite = easyxf('font: name Arial, height 280;' 'borders: bottom thick, right thick, left thick;' 'alignment: horizontal center;' 'pattern: pattern solid, fore_colour white;', num_format_str='0.000E+00')
parameterValueBorderTR = easyxf('font: name Arial, height 280;' 'borders: top thick, right thick;' 'alignment: horizontal center;' 'pattern: pattern solid, fore_colour gray25;', num_format_str='0.000E+00')
redResult = easyxf('font: name Arial, bold True, height 280, colour red;' 'alignment: horizontal center')
#Write the Loop Parameters worksheet
sheetLoopParam = book.add_sheet('Loop Parameters')
for i in range(100):
sheetLoopParam.row(i).set_style(whiteCell)#Make everything white first
sheetLoopParam.col(0).width = 8000
sheetLoopParam.col(1).width = 5000
sheetLoopParam.insert_bitmap(os.path.abspath("PLL_diagram_Excel.bmp"), 0, 0)
sheetLoopParam.write(20,0,'Kphi',columnHeaderBorderTL)
sheetLoopParam.write(20,1,enteredKphi,parameterValueBorderTR)
sheetLoopParam.write(21,0,'KVCO',columnHeaderBorderL)
sheetLoopParam.write(21,1,enteredKVCO,parameterValueBorderR)
sheetLoopParam.write(22,0,'Phase Margin',columnHeaderBorderL)
sheetLoopParam.write(22,1,enteredPM,parameterValue2BorderR)
sheetLoopParam.write(23,0,'Loop Bandwidth',columnHeaderBorderL)
sheetLoopParam.write(23,1,enteredLoopBW,parameterValueBorderR)
sheetLoopParam.write(24,0,'Fout',columnHeaderBorderL)
sheetLoopParam.write(24,1,enteredFout,parameterValueBorderR)
sheetLoopParam.write(25,0,'Fref',columnHeaderBorderL)
sheetLoopParam.write(25,1,enteredFref,parameterValueBorderR)
sheetLoopParam.write(26,0,'R',columnHeaderBorderL)
sheetLoopParam.write(26,1,enteredR,parameterValue2BorderR)
sheetLoopParam.write(27,0,'P',columnHeaderBorderL)
sheetLoopParam.write(27,1,enteredP,parameterValue2BorderR)
sheetLoopParam.write(28,0,'T31',columnHeaderBorderL)
sheetLoopParam.write(28,1,enteredT31,parameterValue2BorderR)
sheetLoopParam.write(29,0,'Gamma',columnHeaderBorderBL)
sheetLoopParam.write(29,1,enteredGamma,parameterValue2BorderBR)
sheetLoopParam.write(32,0," References:",notes)
sheetLoopParam.write(33,0, Formula('HYPERLINK("http://www.ti.com/tool/pll_book";" PLL Performance Simulation and Design Handbook - 4th Edition Dean Banerjee. 2006.")'),link)
sheetLoopParam.write(35,0, Formula('HYPERLINK("mailto:<EMAIL>";" Contact")'),linkContact)
#Write the Noise Sources worksheet
if noiseWorkbook == "":
sheetLoopParam.write(37,0,"***WARNING: Empty noise file or an error occurred while reading the file. Using default noise data instead.***",notesRed)
f, XTALNoise, PFDCPNoise, PrescalerNoise, VCONoise, SDNoise = defaultNoise()
sheetNoiseSources = book.add_sheet('Noise Sources')
for i in range(100):
sheetNoiseSources.row(i).set_style(whiteCell)#Make everything white first
sheetNoiseSources.col(0).width = 6000
sheetNoiseSources.col(1).width = 8000
sheetNoiseSources.col(2).width = 8000
sheetNoiseSources.col(3).width = 10000
sheetNoiseSources.col(4).width = 8000
sheetNoiseSources.col(5).width = 10000
sheetNoiseSources.write(0,0,'Frequency (Hz)',columnHeaderBorderTLBR)
sheetNoiseSources.write(0,1,'XTAL Noise (dBc/Hz)',columnHeaderBorderTLBR)
sheetNoiseSources.write(0,2,'PFDCP Noise (dBc/Hz)',columnHeaderBorderTLBR)
sheetNoiseSources.write(0,3,'Prescaler Noise (dBc/Hz)',columnHeaderBorderTLBR)
sheetNoiseSources.write(0,4,'VCO Noise (dBc/Hz)',columnHeaderBorderTLBR)
sheetNoiseSources.write(0,5,'Sigma Delta Noise (dBc/Hz)',columnHeaderBorderTLBR)
for i in range(len(f)):
sheetNoiseSources.write(i+1,0,f[i],parameterValue)
sheetNoiseSources.write(i+1,1,XTALNoise[i],parameterValue2)
sheetNoiseSources.write(i+1,2,PFDCPNoise[i],parameterValue2)
sheetNoiseSources.write(i+1,3,PrescalerNoise[i],parameterValue2)
sheetNoiseSources.write(i+1,4,VCONoise[i],parameterValue2)
sheetNoiseSources.write(i+1,5,SDNoise[i],parameterValue2)
#Write Loop Filter Components worksheet:
sheetLoopFilter = book.add_sheet('Loop Filter Components')
for i in range(100):
sheetLoopFilter.row(i).set_style(whiteCell)#Make everything white first
sheetLoopFilter.col(0).width = 4000
sheetLoopFilter.col(1).width = 5000
sheetLoopFilter.write(0,0,' Loop Filter Components',columnHeaderBorderTLBRAlignleft)
sheetLoopFilter.write(0,1,None,columnHeaderBorderTLBRAlignleft)
sheetLoopFilter.write(1,0,'C1',columnHeaderBorderTL)
sheetLoopFilter.write(1,1,C1,parameterValue)
sheetLoopFilter.write(2,0,'C2',columnHeaderBorderL)
sheetLoopFilter.write(2,1,C2,parameterValue)
sheetLoopFilter.write(3,0,'C3',columnHeaderBorderL)
sheetLoopFilter.write(3,1,C3,parameterValue)
sheetLoopFilter.write(4,0,'R2',columnHeaderBorderL)
sheetLoopFilter.write(4,1,R2,parameterValue)
sheetLoopFilter.write(5,0,'R3',columnHeaderBorderBL)
sheetLoopFilter.write(5,1,R3,parameterValueBorderBRWhite)
#Write Loop Response worksheet:
sheetLoopResponse = book.add_sheet('Loop Response Data')
for i in range(100):
sheetLoopResponse.row(i).set_style(whiteCell)#Make everything white first
sheetLoopResponse.col(0).width = 6000
sheetLoopResponse.write(0,0,'Frequency (Hz)',columnHeaderBorderTLBR)
sheetLoopResponse.col(1).width = 15000
sheetLoopResponse.write(0,1,'Closed Loop Response Magnitude (dB)',columnHeaderBorderTLBR)
sheetLoopResponse.col(2).width = 14000
sheetLoopResponse.write(0,2,'Open Loop Response Magnitude (dB)',columnHeaderBorderTLBR)
sheetLoopResponse.col(3).width = 14000
sheetLoopResponse.write(0,3,'Open Loop Response Phase (dB)',columnHeaderBorderTLBR)
sheetLoopResponse.col(4).width = 14000
sheetLoopResponse.write(0,4,'VCO Transfer Function Magnitude (dB)',columnHeaderBorderTLBR)
for i in range(len(f)):
sheetLoopResponse.write(i+1,0,f[i],parameterValue)
sheetLoopResponse.write(i+1,1,magCL[i],parameterValue2)
sheetLoopResponse.write(i+1,2,magOL[i],parameterValue2)
sheetLoopResponse.write(i+1,3,phaseOL[i],parameterValue2)
sheetLoopResponse.write(i+1,4,magvcoTF[i],parameterValue2)
#Write Noise Results worksheet:
sheetPLLNoise = book.add_sheet('Output Noise Contributors')
for i in range(100):
sheetPLLNoise.row(i).set_style(whiteCell)#Make everything white first
sheetPLLNoise.col(0).width = 6000
sheetPLLNoise.write(0,0,'Frequency (Hz)',columnHeaderBorderTLBR)
sheetPLLNoise.col(1).width = 6000
sheetPLLNoise.write(0,1,'PFDCP (dBc/Hz)',columnHeaderBorderTLBR)
sheetPLLNoise.col(2).width = 7000
sheetPLLNoise.write(0,2,'Prescaler (dBc/Hz)',columnHeaderBorderTLBR)
sheetPLLNoise.col(3).width = 6000
sheetPLLNoise.write(0,3,'VCO (dBc/Hz)',columnHeaderBorderTLBR)
sheetPLLNoise.col(4).width = 6000
sheetPLLNoise.write(0,4,'R2 (dBc/Hz)',columnHeaderBorderTLBR)
sheetPLLNoise.col(5).width = 6000
sheetPLLNoise.write(0,5,'R3 (dBc/Hz)',columnHeaderBorderTLBR)
sheetPLLNoise.col(6).width = 6000
sheetPLLNoise.write(0,6,'XTAL (dBc/Hz)',columnHeaderBorderTLBR)
sheetPLLNoise.col(7).width = 8000
sheetPLLNoise.write(0,7,'Sigma Delta (dBc/Hz)',columnHeaderBorderTLBR)
sheetPLLNoise.col(8).width = 8000
sheetPLLNoise.write(0,8,'Total Noise (dBc/Hz)',columnHeaderBorderTLBR)
for i in range(len(f)):
sheetPLLNoise.write(i+1,0,f[i],parameterValue)
sheetPLLNoise.write(i+1,1,PFDCPNoiseOut[i],parameterValue2)
sheetPLLNoise.write(i+1,2,PrescalerNoiseOut[i],parameterValue2)
sheetPLLNoise.write(i+1,3,VCONoiseOut[i],parameterValue2)
sheetPLLNoise.write(i+1,4,R2NoiseOut[i],parameterValue2)
sheetPLLNoise.write(i+1,5,R3NoiseOut[i],parameterValue2)
sheetPLLNoise.write(i+1,6,XTALNoiseOut[i],parameterValue2)
sheetPLLNoise.write(i+1,7,SDNoiseOut[i],parameterValue2)
sheetPLLNoise.write(i+1,8,TotalNoise[i],parameterValue2)
#Write Time Response worksheet:
sheetPLLTime = book.add_sheet('Time Response')
for i in range(2050):
sheetPLLTime.row(i).set_style(whiteCell)#Make everything white first
sheetPLLTime.col(0).width = 5000
sheetPLLTime.write(0,0,'Time (s)',columnHeaderBorderTLBR)
sheetPLLTime.col(1).width = 9000
sheetPLLTime.write(0,1,'Output Frequency (Hz)',columnHeaderBorderTLBR)
for i in range(len(t)):
sheetPLLTime.write(i+1,0,t[i],parameterValue)
sheetPLLTime.write(i+1,1,fT[i],parameterValue)
#Write lock times
sheetLockTimes = book.add_sheet('Lock Times')
for i in range(100):
sheetLockTimes.row(i).set_style(whiteCell)#Make everything white first
sheetLockTimes.col(0).width = 11000
sheetLockTimes.write(0,0,'Locks within what % error',columnHeaderBorderTLBR)
sheetLockTimes.col(1).width = 11000
sheetLockTimes.write(0,1,'Locks within how many Hertz',columnHeaderBorderTLBR)
sheetLockTimes.col(2).width = 6000
sheetLockTimes.write(0,2,'Lock Time (s)',columnHeaderBorderTLBR)
sheetLockTimes.write(1,0,0.00001,parameterValue3)
sheetLockTimes.write(1,1,float(scientific(0.00001*f2)),parameterValue)
sheetLockTimes.write(1,2,lockTime_0p001Pcnt,parameterValue)
sheetLockTimes.write(2,0,0.000001,parameterValue3)
sheetLockTimes.write(2,1,float(scientific(0.000001*f2)),parameterValue)
sheetLockTimes.write(2,2,lockTime_0p0001Pcnt,parameterValue)
sheetLockTimes.write(3,0,0.0000001,parameterValue3)
sheetLockTimes.write(3,1,float(scientific(0.0000001*f2)),parameterValue)
sheetLockTimes.write(3,2,lockTime_0p00001Pcnt,parameterValue)
sheetLockTimes.write(4,0,0.00000001,parameterValue3)
sheetLockTimes.write(4,1,float(scientific(0.00000001*f2)),parameterValue)
sheetLockTimes.write(4,2,lockTime_0p000001Pcnt,parameterValue)
#Phase Error and Jitter worksheets
sheetphaseError = book.add_sheet('Phase Error')
for i in range(650):
sheetphaseError.row(i).set_style(whiteCell)#Make everything white first
sheetphaseError.col(0).width = 6000
sheetphaseError.write(0,0,'Frequency (Hz)',columnHeaderBorderTLBR)
sheetphaseError.col(1).width = 8000
sheetphaseError.write(0,1,'Total Noise (V2/Hz)',columnHeaderBorderTLBR)
sheetphaseError.col(2).width = 12500
sheetphaseError.write(0,2,'Frequency Error Integrand (V2*Hz)',columnHeaderBorderTLBR)
for i in range(len(fInterpol)):
sheetphaseError.write(i+1,0,fInterpol[i],parameterValue)
sheetphaseError.write(i+1,1,TotalNoiseV2HzInterpol[i],parameterValue)
sheetphaseError.write(i+1,2,fInterpol[i]*fInterpol[i]*TotalNoiseV2HzInterpol[i],parameterValue)
sheetphaseError.col(4).width = 12000
sheetphaseError.write(1,4,"Lower Integration Limit (Hz)", columnHeaderBorderTL)
sheetphaseError.write(2,4,"Upper Integration Limit (Hz)", columnHeaderBorderBL)
sheetphaseError.write(3,4,"RMS Phase Error (degrees)", columnHeaderBorderL)
sheetphaseError.write(4,4,"Jitter (s)", columnHeaderBorderL)
sheetphaseError.write(5,4,"RMS Frequency Error (Hz)", columnHeaderBorderBL)
sheetphaseError.col(5).width = 6000
sheetphaseError.write(1,5,1.7e3,parameterValueBorderTR)
sheetphaseError.write(2,5,200e3,parameterValueBorderBR)
sheetphaseError.write(7,4,"Enter lower and upper integration limits to calculate RMS Phase Error",notes)
sheetphaseError.write(8,4,"Required: (Upper Int. Limit) >= 20*(Lower Int. Limit)",notes)
sheetphaseError.write(9,4,"Calculations made using interpolated data",notes)
#x="(180/PI())*SQRT(2*((VLOOKUP(E3,A2:B32,1)-VLOOKUP(E2,A2:B32,1))/6)*(VLOOKUP(E2,A2:B32,2)+VLOOKUP(E3,A2:B32,2)+4*VLOOKUP(((VLOOKUP(E2,A2:B32,1)+VLOOKUP(E3,A2:B32,1))/2.0),A2:B32,2)))"
freqError="""SQRT(
2*((F3-F3/1.5)/6)*(VLOOKUP(F3,A2:C602,3) + VLOOKUP(F3/1.5,A2:C602,3) + 4*VLOOKUP((F3 + F3/1.5)/2,A2:C602,3))
+ 2*((F3/1.5-F3/2.25)/6)*(VLOOKUP(F3/1.5,A2:C602,3) + VLOOKUP(F3/2.25,A2:C602,3) + 4*VLOOKUP((F3/1.5 + F3/2.25)/2,A2:C602,3))
+ 2*((F3/2.25-F3/3.375)/6)*(VLOOKUP(F3/2.25,A2:C602,3) + VLOOKUP(F3/3.375,A2:C602,3) + 4*VLOOKUP((F3/2.25 + F3/3.375)/2,A2:C602,3))
+ 2*((F3/3.375-F3/5.0625)/6)*(VLOOKUP(F3/3.375,A2:C602,3) + VLOOKUP(F3/5.0625,A2:C602,3) + 4*VLOOKUP((F3/3.375 + F3/5.0625)/2,A2:C602,3))
+ 2*((F3/5.0625-F3/7.594)/6)*(VLOOKUP(F3/5.0625,A2:C602,3) + VLOOKUP(F3/7.594,A2:C602,3) + 4*VLOOKUP((F3/5.0625 + F3/7.594)/2,A2:C602,3))
+ 2*((F3/7.594-F3/11.39)/6)*(VLOOKUP(F3/7.594,A2:C602,3) + VLOOKUP(F3/11.39,A2:C602,3) + 4*VLOOKUP((F3/7.594 + F3/11.39)/2,A2:C602,3))
+ 2*((F3/11.39-F3/17.086)/6)*(VLOOKUP(F3/11.39,A2:C602,3) + VLOOKUP(F3/17.086,A2:C602,3) + 4*VLOOKUP((F3/11.39 + F3/17.086)/2,A2:C602,3))
+ 2*((F3/17.086-F2)/6)*(VLOOKUP(F3/17.086,A2:C602,3) + VLOOKUP(F2,A2:C602,3) + 4*VLOOKUP((F3/17.086 + F2)/2,A2:C602,3))
)"""#Take the Simpson integral over several intervals.
phaseError="""(180/PI())*SQRT(
2*((F2*1.5-F2)/6)*(VLOOKUP(F2*1.5,A2:C602,2) + VLOOKUP(F2,A2:C602,2) + 4*VLOOKUP((F2*1.5 + F2)/2,A2:C602,2))
+ 2*((F2*2.25-F2*1.5)/6)*(VLOOKUP(F2*2.25,A2:C602,2) + VLOOKUP(F2*1.5,A2:C602,2) + 4*VLOOKUP((F2*2.25 + F2*1.5)/2,A2:C602,2))
+ 2*((F2*3.375-F2*2.25)/6)*(VLOOKUP(F2*3.375,A2:C602,2) + VLOOKUP(F2*2.25,A2:C602,2) + 4*VLOOKUP((F2*3.375 + F2*2.25)/2,A2:C602,2))
+ 2*((F2*5.0625-F2*3.375)/6)*(VLOOKUP(F2*5.0625,A2:C602,2) + VLOOKUP(F2*3.375,A2:C602,2) + 4*VLOOKUP((F2*5.0625 + F2*3.375)/2,A2:C602,2))
+ 2*((F2*7.594-F2*5.0625)/6)*(VLOOKUP(F2*7.594,A2:C602,2) + VLOOKUP(F2*5.0625,A2:C602,2) + 4*VLOOKUP((F2*7.594 + F2*5.0625)/2,A2:C602,2))
+ 2*((F2*11.39-F2*7.594)/6)*(VLOOKUP(F2*11.39,A2:C602,2) + VLOOKUP(F2*7.594,A2:C602,2) + 4*VLOOKUP((F2*11.39 + F2*7.594)/2,A2:C602,2))
+ 2*((F2*17.086-F2*11.39)/6)*(VLOOKUP(F2*17.086,A2:C602,2) + VLOOKUP(F2*11.39,A2:C602,2) + 4*VLOOKUP((F2*17.086 + F2*11.39)/2,A2:C602,2))
+ 2*((F3-F2*17.086)/6)*(VLOOKUP(F3,A2:C602,2) + VLOOKUP(F2*17.086,A2:C602,2) + 4*VLOOKUP((F3 + F2*17.086)/2,A2:C602,2))
)"""#Take the Simpson integral over several intervals.
jitter="F4/360.0/'Loop Parameters'!B25"
y="(180/PI())*SQRT(2*((E3-E2)/6)*(VLOOKUP(E2,A2:B32,2)+VLOOKUP(E3,A2:B32,2)+4*VLOOKUP(((E3+E2)/2.0),A2:B32,2)))"
sheetphaseError.write(3,5,Formula(phaseError),parameterValue2BorderR)
sheetphaseError.write(4,5,Formula(jitter),parameterValueBorderR)
sheetphaseError.write(5,5,Formula(freqError),parameterValue2BorderBR)
return book
def timeResponse(A2,A1,A0,T2,Fcomp,Fout,LoopBW,KVCO,CPGain):
'''
Computes time response and lock times of PLL
'''
f1 = 0.99*Fout
f2 = 1.01*Fout
N2 = float(f2/Fcomp)#Need to use N of final frequency
K = KVCO*CPGain/N2
denCoeff=[A2,A1,A0,K*T2,K]
denRoots=np.roots(denCoeff)
p=[]
for root in denRoots:
p.append(root)
#print p
B = []
Bconst = K*(f2 - f1)/A2
B.append(Bconst*((1/(p[0]-p[1]))*(1/(p[0]-p[2]))*(1/(p[0]-p[3]))))
B.append(Bconst*((1/(p[1]-p[0]))*(1/(p[1]-p[2]))*(1/(p[1]-p[3]))))
B.append(Bconst*((1/(p[2]-p[0]))*(1/(p[2]-p[1]))*(1/(p[2]-p[3]))))
B.append(Bconst*((1/(p[3]-p[0]))*(1/(p[3]-p[1]))*(1/(p[3]-p[2]))))
#print 'B = ', B
# natFreq=math.sqrt(KVCO*CPGain/(N2*A0))
# dampFactor=T2*natFreq/2
# tol = (f2-f1)/1e5
# lockTimeApprox = -math.log((tol/(f2-f1))*math.sqrt(1.0-dampFactor**2))/(dampFactor*natFreq)
# print 'Lock time approx: ',lockTimeApprox
t=np.linspace(0,8.0/LoopBW/1.0,2000)
fT = []
B0 = []
B1 = []
B2 = []
B3 = []
errorfT = []
def expComplex(alpha,beta):
#Euler's formula: exp(beta) = cos(beta) + 1j*sin(beta)
return (math.exp(alpha)*(math.cos(beta)+1j*math.sin(beta)))
for i in range(len(t)):
B0.append(B[0]*expComplex((p[0]*t[i]).real, (p[0]*t[i]).imag)*((1/p[0])+T2))
B1.append(B[1]*expComplex((p[1]*t[i]).real, (p[1]*t[i]).imag)*((1/p[1])+T2))
B2.append(B[2]*expComplex((p[2]*t[i]).real, (p[2]*t[i]).imag)*((1/p[2])+T2))
B3.append(B[3]*expComplex((p[3]*t[i]).real, (p[3]*t[i]).imag)*((1/p[3])+T2))
fT.append((f2 + B0[i] + B1[i] + B2[i] + B3[i]).real)
errorfT.append(abs(fT[i]-f2))
Tol_0p001Pcnt = (0.001/100.00)*f2
Tol_0p0001Pcnt = (0.0001/100.00)*f2
Tol_0p00001Pcnt = (0.00001/100.00)*f2
Tol_0p000001Pcnt = (0.000001/100.00)*f2
def findError(t,errorfT,tol,index):
#Looks in errorfT for errorfT[i]<tol.
#Starts looking at i=index
#As soon as it finds one it returns the index and the corresponding errorfT.
for i in range(index,len(t)):
if errorfT[i]<tol:
return i, errorfT[i]
return t, fT
def findLockTime(t,errotfT,tol,startIndex,error):
for i in range(startIndex,len(t)):
if (errorfT[i] > error):#if there's an errorfT[i] that is larger than the error found:
#It means the given error is bogus, later in time there'll be bigger errors.
#Start looking in errorfT, starting at i+1, for an error smaller than tol
newStartIndex,newError=findError(t,errorfT,tol,i+1)#returns a new index and corresponding error
#and need to look again in errorfT *in the new range* for errorfT < tol
return findLockTime(t,errotfT,tol,newStartIndex,newError)#Recursion!
return t[startIndex]#if there's no error in errorfT larger than the given one, return t[i], i.e. the lock time.
lockTime_0p001Pcnt = findLockTime(t,errorfT,Tol_0p001Pcnt,0,Tol_0p001Pcnt)
lockTime_0p0001Pcnt = findLockTime(t,errorfT,Tol_0p0001Pcnt,0,Tol_0p0001Pcnt)
lockTime_0p00001Pcnt = findLockTime(t,errorfT,Tol_0p00001Pcnt,0,Tol_0p00001Pcnt)
lockTime_0p000001Pcnt = findLockTime(t,errorfT,Tol_0p000001Pcnt,0,Tol_0p000001Pcnt)
return t, fT, lockTime_0p001Pcnt, lockTime_0p0001Pcnt, lockTime_0p00001Pcnt, lockTime_0p000001Pcnt, f2
jinja_environment = jinja2.Environment(autoescape=True,
loader=jinja2.FileSystemLoader(os.path.join(os.path.dirname(__file__), 'templates')))
class MainHandler(webapp2.RequestHandler):
def write_form(self,Kphi="5E-3",KVCO="30E6",P="8.0",PM="50.0",LoopBW="5.1E3",Fout="900E6",Fref="200E3",R="1.0",T31="0.6",Gamma="1.136",error=""):
dictStringSubst={"Kphi": Kphi, "KVCO": KVCO, "P": P, "PM": PM, "LoopBW": LoopBW, "Fout": Fout, "Fref": Fref, "R": R, | |
# -*- coding: utf-8 -*-
import json
import os
import pickle
import random
import numpy as np
import torch
from sklearn import metrics
from sklearn.metrics import precision_recall_fscore_support, accuracy_score
from torch.utils.data import DataLoader, SequentialSampler, TensorDataset
from tqdm import tqdm
import config
from utils.utils import TriplesReader as read_triples
from utils.utils import read_relations, read_entities
from typing import Dict, Tuple, List, Set, Iterable, Any
class AverageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=0):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / (.0001 + self.count)
def __str__(self):
"""
String representation for logging
"""
# for values that should be recorded exactly e.g. iteration number
if self.count == 0:
return str(self.val)
# for stats
return '%.4f (%.4f)' % (self.val, self.avg)
def set_seed():
seed = config.SEED
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
# For each element in the batch, given the:
# - label logits,
# - gold labels, and
# - source and target entity indices of the triple,
# compute
def compute_metrics(logits, labels, groups, set_type, logger, ent_types=False) -> Dict[str, Any]:
# - eval['logits'] is [B * N, C]
# - eval['labels'] is [B * N]
# - eval['names'] is [B * N, G, 2] -- note that B=1 in the code
# - eval['groups'] is [B * N, 2] -- note that B=1 in the code
# groups was eval['names'] originally, called from train-cli.py
# Read relation mappings and triples
if ent_types:
# Get the entity-to-id and relation-to-id mappings from entities_types.txt and relations_types.txt
rel2idx = read_relations(config.relations_file_types)
entity2idx = read_entities(config.entities_file_types)
if set_type == "dev":
# triples_types_dev.tsv
triples_file = config.triples_types_file_dev
else:
# triples_types_test.tsv
triples_file = config.triples_file_test
# entities.txt
entity2idx = read_entities(config.entities_file)
else:
# Get the entity-to-id and relation-to-id mappings from entities.txt and relations.txt
rel2idx = read_relations(config.relations_file)
entity2idx = read_entities(config.entities_file)
if set_type == "dev":
# triples_dev.tsv
triples_file = config.triples_file_dev
else:
# triples_test.tsv
triples_file = config.triples_file_test
# Read triples, where we have indices instead of entity names, and does not include 'na' triples
triples: Set[Tuple[int, str, int]] = set()
print('Loaded ', triples_file)
# For each triple ..
for src, rel, tgt in read_triples(triples_file):
if rel != "na":
# .. make sure that the relation type is not NA, and add it to 'triples'
triples.add((entity2idx[src], rel, entity2idx[tgt]))
# RE predictions
# [B * N, C]
probas = torch.nn.Softmax(-1)(logits).squeeze()
re_preds = list()
# For each of the B * N instances ..
for i in range(probas.size(0)):
# group has shape [2]
group = groups[i]
# Let's get the two items from the group, ie. the source and the target entities ..
src, tgt = group[0].item(), group[1].item()
# For each possible relation types ..
for rel, rel_idx in rel2idx.items():
if rel != "na":
# For instance i, take the logit of that relation type ..
score = probas[i][rel_idx].item()
# And add it to the possible predictions, in re_preds.
# WE NEED ALL POSSIBLE PREDICTIONS BECAUSE THI IS A RANKING TASK NOW.
re_preds.append({
"src": src,
"tgt": tgt,
"relation": rel,
"score": score
})
# Adopted from:
# https://github.com/thunlp/OpenNRE/blob/master/opennre/framework/data_loader.py#L230
# Sort the predictions based on their scores
sorted_re_preds = sorted(re_preds, key=lambda x: x["score"], reverse=True)
# Remove duplicate triples from sorted_re_preds
sorted_re_preds = non_dup_ordered_seq(sorted_re_preds)
P = list()
R = list()
correct = 0
total = len(triples)
# For each prediction dictionary, where duplicate (s, p, o) triples were removed ..
for i, item in enumerate(sorted_re_preds):
# Get the subject, predicate, and object of the triple, where for each (s, o) pair we have all possible
# values for p ..
relation = item["relation"]
src, tgt = item["src"], item["tgt"]
# If the (s, p, o) triple appears in triples ..
if (src, relation, tgt) in triples:
# Increment the 'correct' counter
correct += 1
# P = list of [nb_correct_predictions / nb_predictions so far]
# R = list of [nb_correct_predictions / nb_triples]
P.append(float(correct) / float(i + 1))
R.append(float(correct) / float(total))
# Compute AUC, and F1
auc = metrics.auc(x=R, y=P)
P = np.array(P)
R = np.array(R)
f1 = (2 * P * R / (P + R + 1e-20)).max()
# Added metrics
added_metrics = {}
for n in range(100, 1000, 100): # 100, 200, etc recall
added_metrics['P@{}'.format(n)] = sum(P[:n]) / n
for n in range(2000, total, 2000):
added_metrics['P@{}'.format(n)] = sum(P[:n]) / n
added_metrics['P@{}'.format(total)] = sum(P[:total]) / total
# Accuracy
na_idx = rel2idx["na"]
# Get the prediction with the highest probability; (I think the torch.nn.Softmax here can be omitted)
preds = torch.argmax(torch.nn.Softmax(-1)(logits), -1)
# Compute the accuracy -- discrepancy between predicted and gold labels
acc = float((preds == labels).long().sum()) / labels.size(0)
# Compute the total number of non-NA gold relations ..
pos_total = (labels != na_idx).long().sum()
# For all non-NA gold labels, number of times that the predict label and the gold label match
pos_correct = ((preds == labels).long() * (labels != na_idx).long()).sum()
if pos_total > 0:
# Accuracy for non-NA relations
pos_acc = float(pos_correct) / float(pos_total)
else:
pos_acc = 0
logger.info(" accuracy = %s", str(acc))
logger.info(" pos_accuracy = %s", str(pos_acc))
# Return a dict with all the results
results = {
"P": list(P[:5]),
"R": list(R[:5]),
"F1": f1,
"AUC": auc,
"accuracy: ": str(acc),
"pos_accuracy: ": str(pos_acc)
}
results.update(added_metrics)
return results
def save_eval_results(results,
eval_dir,
set_type,
logger,
prefix=""):
os.makedirs(eval_dir, exist_ok=True)
output_eval_file = os.path.join(eval_dir, "eval_results.txt")
with open(output_eval_file, "w") as wf:
logger.info("***** {} results {} *****".format(set_type, prefix))
for key in sorted(results.keys()):
logger.info(" %s = %s", key, str(results[key]))
wf.write("%s = %s\n" % (key, str(results[key])))
def load_dataset(set_type: str,
logger,
ent_types: bool = False) -> TensorDataset:
if set_type == "train":
if ent_types:
features_file = config.feats_file_types_train
else:
features_file = config.feats_file_train
elif set_type == "dev":
if ent_types:
features_file = config.feats_file_types_dev
else:
features_file = config.feats_file_dev
else:
if ent_types:
features_file = config.feats_file_types_test
else:
features_file = config.feats_file_test
logger.info("Loading features from cached file %s", features_file)
features = torch.load(features_file)
all_input_ids = torch.cat([f["input_ids"].unsqueeze(0) for f in features]).long()
all_entity_ids = torch.cat([f["entity_ids"].unsqueeze(0) for f in features]).long()
all_attention_mask = torch.cat([f["attention_mask"].unsqueeze(0) for f in features]).long()
all_groups = torch.cat([torch.tensor(f["group"]).unsqueeze(0) for f in features]).long()
all_labels = torch.tensor([f["label"] for f in features]).long()
if ent_types: # include ent names within ent types
all_names = [f["ent_names"] for f in features]
all_names = convert_names_to_cuis(all_names)
dataset = TensorDataset(all_input_ids, all_entity_ids, all_attention_mask, all_groups, all_labels, all_names)
else:
dataset = TensorDataset(all_input_ids, all_entity_ids, all_attention_mask, all_groups, all_labels)
return dataset
def convert_names_to_cuis(l_names):
entity2idx = read_entities(config.entities_file)
lc = []
for l_bag in l_names:
lb = []
for l_group in l_bag:
lb.append((entity2idx[l_group[0]], entity2idx[l_group[1]]))
lc.append(lb)
lc = torch.IntTensor(lc)
return lc
#
# cf. https://stackoverflow.com/a/480227
#
# "seq" is a sequence of Dict where the key are details of a given prediction, including the "score", and the value
# is the source or target entity, or the considered relation type "relation"
#
def non_dup_ordered_seq(seq: Iterable[Dict[str, Any]]) -> List[Dict[str, Any]]:
seen = set()
seen_add = seen.add
# Create a list of predictions such that the triple in the prediction (subject and object entities, and possible
# relation type) are unique
non_dup_seq: List[Dict[str, Any]] = list()
# For each of the predictions ..
for item in seq:
# Extract the subject, predicate, and object of the triple ..
# Here 'relation' appears with all its possible values in 'seq'
relation = item["relation"]
src, tgt = item["src"], item["tgt"]
# Build the triple ..
triple = (src, relation, tgt)
# If the triple appears for the first time, add the prediction dictionary to non_dup_seq
if not (triple in seen or seen_add(triple)):
non_dup_seq.append(item)
return non_dup_seq
def evaluate(model, logger, set_type: str = "dev", prefix: str = "", ent_types: bool = False):
eval_output_dir = config.output_dir
# Load the dataset ...
eval_dataset: TensorDataset = load_dataset(set_type, logger, ent_types=ent_types)
if not os.path.exists(eval_output_dir):
os.makedirs(eval_output_dir)
# Load the data loader ...
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=config.eval_batch_size)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", config.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
eval_logits, eval_labels, eval_preds, eval_groups, eval_dirs, eval_names = [], [], [], [], [], []
# For each batch in the dataset ...
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
# Move the batches to GPU ..
batch = tuple(t.to(config.device) for | |
tup: tup[0])
sorted_result2 = sorted(result2, key=lambda tup: tup[0])
if sorted_result1 != sorted_result2 :
return False
#**************************************************
#* Commented out *
#**************************************************
#Check AbundanceTable.__str__(self) - Commented out by GW on 2013/09/14 because
#If we import pcl file into biom file and then export to pcl, the file names might be different but the tables might be the same
#Check string representation
#if AbundanceTable.__str__(self) != AbundanceTable.__str__(objOther):
#return False
#Check if sample ids are the same and in the same order
if self.funcGetSampleNames() != objOther.funcGetSampleNames():
return False
return True
def __ne__(self, objOther):
return not self == objOther
#Testing Status: Light happy path testing
#TODO: Tim change static to class methods
@staticmethod
def _funcTextToStructuredArray(xInputFile = None, cDelimiter = ConstantsBreadCrumbs.c_cTab, sMetadataID = None, sLastMetadataRow = None, sLastMetadata = None, ostmOutputFile = None):
"""
Private method
Used to read in a file that is samples (column) and taxa (rows) into a structured array.
:param xInputFile: File stream or path to input file.
:type: String File stream or string path.
:param cDelimiter: Delimiter for parsing the input file.
:type: Character Character.
:param sMetadataID: String ID that is a metadata row ID (found on the first column) and used as an ID for samples.
If not given it is assumed to be position 0
:type: String String ID
:param sLastMetadataRow: String ID that is the last row metadat id (id of the most right column with row/feature metadata)
:type: String String ID
:param sLastMetadata: The ID of the metadata that is the last metadata before measurement or feature rows.
:type: String String ID
:param ostmOutputFile: Output File to write to if needed. None does not write the file.
:type: FileStream or String
:return [taxData,metadata,rowmetadata]: Numpy Structured Array of abundance data and dictionary of metadata.
Metadata is a dictionary as such {"ID", [value,value,values...]}
Values are in the order thety are read in (and the order of the sample names).
ID is the first column in each metadata row.
- rowmetadata is a optional Numpy strucured array (can be None if not made)
+ rowmetadata is a optional RowMetadata object (can be None if not made)
The rowmetadata and taxData row Ids should match
- [Numpy structured Array, Dictionary, Numpy structured array]
+ The last dict is a collection of BIOM fielparameters when converting from a BIOM file
+ [Numpy structured Array, Dictionary, Numpy structured array, dict]
"""
# Open file from a stream or file path
istmInput = open( xInputFile, 'rU' ) if isinstance(xInputFile, str) else xInputFile
# Flag that when incremented will switch from metadata parsing to data parsing
iFirstDataRow = -1
# Sample id row
namesRow = None
# Row metadata names
lsRowMetadataIDs = None
# Index of the last row metadata
iIndexLastMetadataRow = None
# Holds metadata {ID:[list of values]}
metadata = dict()
# Holds the data measurements [(tuple fo values)]
dataMatrix = []
# Holds row metadata { sID : [ list of values ] }
dictRowMetadata = {}
# Positional index
iIndex = -1
# File handle
csvw = None
# Read in files
if ostmOutputFile:
csvw = csv.writer( open(ostmOutputFile,'w') if isinstance(ostmOutputFile, str) else ostmOutputFile, csv.excel_tab, delimiter = cDelimiter )
# For each line in the file, and assume the tax id is the first element and the data follows
for lsLineElements in csv.reader( istmInput, dialect = csv.excel_tab, delimiter = cDelimiter ):
iIndex += 1
taxId, sampleReads = lsLineElements[0], lsLineElements[1:]
# Read through data measurements
# Process them as a list of tuples (needed for structured array)
if iFirstDataRow > 0:
try:
# Parse the sample reads, removing row metadata and storing row metadata if it exists
if lsRowMetadataIDs:
# Build expected dict for row metadata dictionary {string feature id: {'metadata': {metadatakey: [list of metadata values]}}}
dictFeature = dict([ [sID, [sKey]] for sID, sKey in zip( lsRowMetadataIDs, sampleReads[ 0 : iIndexLastMetadataRow ]) ])
if len( dictFeature ):
dictRowMetadata[ taxId ] = { ConstantsBreadCrumbs.c_metadata_lowercase: dictFeature }
dataMatrix.append(tuple([taxId] + [( float(s) if s.strip( ) else 0 ) for s in sampleReads[ iIndexLastMetadataRow: ]]))
else:
dataMatrix.append(tuple([taxId] + [( float(s) if s.strip( ) else 0 ) for s in sampleReads]))
except ValueError:
sys.stderr.write( "AbundanceTable:textToStructuredArray::Error, non-numerical value on data row. File:" + str(xInputFile) +
" Row:" + str(lsLineElements) + "\n" )
return False
# Go through study measurements
else:
# Read in metadata values, if the entry is blank then give it the default empty metadata value.
for i, s in enumerate( sampleReads ):
if not s.strip( ):
sampleReads[i] = ConstantsBreadCrumbs.c_strEmptyDataMetadata
# If no id metadata (sample ids) is given then the first row is assumed to be the id row, otherwise look for the id for the metadata.
# Add the metadata to the containing dict
if ( ( not sMetadataID ) and ( iIndex == 0 ) ) or ( taxId == sMetadataID ):
namesRow = lsLineElements
# Remove the row metadata ids, these names are for the column ID and the samples ids
if sLastMetadataRow:
iIndexLastMetadataRow = lsLineElements.index(sLastMetadataRow)
lsRowMetadataIDs = namesRow[ 1 : iIndexLastMetadataRow + 1 ]
namesRow = [ namesRow[ 0 ] ] + namesRow[ iIndexLastMetadataRow + 1: ]
# If the sample metadata dictionary already has entries then remove the row metadata info from it.
if len( metadata ) and len( lsRowMetadataIDs ):
for sKey, lsValues in metadata.items():
metadata[ sKey ] = lsValues[ iIndexLastMetadataRow: ]
# Set the metadata without row metadata entries
metadata[taxId] = sampleReads[ iIndexLastMetadataRow: ] if (lsRowMetadataIDs and len( lsRowMetadataIDs )) else sampleReads
# If the last metadata was just processed switch to data processing
# If the last metadata name is not given it is assumed that there is only one metadata
if ( not sLastMetadata ) or ( taxId == sLastMetadata ):
iFirstDataRow = iIndex + 1
# If writing out the data write back out the line read in.
# This happens at the end so that the above cleaning is captured and written.
if csvw:
csvw.writerow( [taxId] + sampleReads )
if sLastMetadata and ( not dataMatrix ):
sys.stderr.write( "AbundanceTable:textToStructuredArray::Error, did not find the row for the last metadata ID. File:" + str(xInputFile) +
" Identifier:" + sLastMetadata + "\n" )
return False
# Make sure the names are found
if namesRow == None:
sys.stderr.write( "AbundanceTable:textToStructuredArray::Error, did not find the row for the unique sample/column. File:" + str(xInputFile) +
" Identifier:" + sMetadataID + "\n" )
return False
# Now we know the longest taxId we can define the first column holding the tax id
# Gross requirement of Numpy structured arrays, a = ASCII followed by max # of characters (as a string)
longestTaxId = max( len(a[0]) for a in dataMatrix )
dataTypeVector = [(namesRow[0],'a' + str(longestTaxId*2))] + [(s, "f4") for s in namesRow[1:]]
# Create structured array
taxData = np.array(dataMatrix,dtype=np.dtype(dataTypeVector))
# Returns a none currently because the PCL file specification this originally worked on did not have feature metadata
# Can be updated in the future.
# [Data (structured array), column metadata (dict), row metadata (structured array), file metadata (dict)]
return [taxData, metadata, RowMetadata(dictRowMetadata = dictRowMetadata, lsRowMetadataIDs = lsRowMetadataIDs), {
ConstantsBreadCrumbs.c_strIDKey:ConstantsBreadCrumbs.c_strDefaultPCLID,
ConstantsBreadCrumbs.c_strDateKey:str(date.today()),
ConstantsBreadCrumbs.c_strFormatKey:ConstantsBreadCrumbs.c_strDefaultPCLFileFormateType,
ConstantsBreadCrumbs.c_strSourceKey:ConstantsBreadCrumbs.c_strDefaultPCLGenerationSource,
ConstantsBreadCrumbs.c_strTypekey:ConstantsBreadCrumbs.c_strDefaultPCLFileTpe,
ConstantsBreadCrumbs.c_strURLKey:ConstantsBreadCrumbs.c_strDefaultPCLURL,
ConstantsBreadCrumbs.c_strSparsityKey:ConstantsBreadCrumbs. c_fDefaultPCLSparsity}]
# def funcAdd(self,abndTwo,strFileName=None):
# """
# Allows one to add an abundance table to an abundance table. They both must be the same state of normalization or summation
# or they will be summed or normalized if one of the two are.
#
# :param abndTwo: AbundanceTable object 2
# :type: AbundanceTable
# :return AbudanceTable:
# """
#
# #Check summation and normalization
# if(self.funcIsSummed() or abndTwo.funcIsSummed()):
# self.funcSum()
# abndTwo.funcSum()
# if(self.funcIsNormalized() or abndTwo.funcIsNormalized()):
# self.funcNormalize()
# abndTwo.funcNormalize()
#
# #Normalize Feature names
# #Get if the abundance tables have clades
# fAbndInputHasClades = self.funcHasFeatureHierarchy()
# fAbndCompareHasClades = abndTwo.funcHasFeatureHierarchy()
#
# if(fAbndInputHasClades or fAbndCompareHasClades):
# #If feature delimiters do not match, switch
# if not self.funcGetFeatureDelimiter() == abndTwo.funcGetFeatureDelimiter():
# abndTwo.funcSetFeatureDelimiter(self.funcGetFeatureDelimiter())
#
# #Add prefixes if needed.
# self.funcAddCladePrefixToFeatures()
# abndTwo.funcAddCladePrefixToFeatures()
#
# #Get feature Names
# lsFeatures1 = self.funcGetFeatureNames()
# lsFeatures2 = abndTwo.funcGetFeatureNames()
#
# #Make one | |
Alexa global
'http://www.virtapay.com/',
# Why: #3038 in Alexa global
'http://www.jobdiagnosis.com/',
# Why: #3039 in Alexa global
'http://guokr.com/',
# Why: #3040 in Alexa global
'http://www.clickpoint.it/',
# Why: #3041 in Alexa global
'http://3dmgame.com/',
# Why: #3042 in Alexa global
'http://www.ashleymadison.com/',
# Why: #3043 in Alexa global
'http://www.utsprofitads.com/',
# Why: #3044 in Alexa global
'http://www.google.ee/',
# Why: #3045 in Alexa global
'http://www.365jia.cn/',
# Why: #3046 in Alexa global
'http://www.oyunskor.com/',
# Why: #3047 in Alexa global
'http://www.metro.co.uk/',
# Why: #3048 in Alexa global
'http://www.ebaumsworld.com/',
# Why: #3049 in Alexa global
'http://www.realsimple.com/',
# Why: #3050 in Alexa global
'http://www.3file.info/',
# Why: #3051 in Alexa global
'http://www.xcams.com/',
# Why: #3052 in Alexa global
'http://www.cyberforum.ru/',
# Why: #3053 in Alexa global
'http://www.babble.com/',
# Why: #3054 in Alexa global
'http://www.lidl.de/',
# Why: #3055 in Alexa global
'http://www.pixer.mobi/',
# Why: #3056 in Alexa global
'http://www.yell.com/',
# Why: #3057 in Alexa global
'http://www.alnilin.com/',
# Why: #3058 in Alexa global
'http://www.lurkmore.to/',
# Why: #3059 in Alexa global
'http://www.olx.co.za/',
# Why: #3060 in Alexa global
'http://www.eorezo.com/',
# Why: #3061 in Alexa global
'http://www.baby.ru/',
# Why: #3062 in Alexa global
'http://www.xdf.cn/',
# Why: #3063 in Alexa global
'http://www.redporntube.com/',
# Why: #3064 in Alexa global
'http://www.extabit.com/',
# Why: #3065 in Alexa global
'http://www.wayn.com/',
# Why: #3066 in Alexa global
'http://www.gaana.com/',
# Why: #3067 in Alexa global
'http://www.islamicfinder.org/',
# Why: #3068 in Alexa global
'http://www.venturebeat.com/',
# Why: #3069 in Alexa global
'http://www.played.to/',
# Why: #3070 in Alexa global
'http://www.alrakoba.net/',
# Why: #3071 in Alexa global
'http://www.mouthshut.com/',
# Why: #3072 in Alexa global
'http://www.banquepopulaire.fr/',
# Why: #3073 in Alexa global
'http://www.jal.co.jp/',
# Why: #3074 in Alexa global
'http://www.dasoertliche.de/',
# Why: #3075 in Alexa global
'http://www.1stwebdesigner.com/',
# Why: #3076 in Alexa global
'http://www.tam.com.br/',
# Why: #3077 in Alexa global
'http://www.nature.com/',
# Why: #3078 in Alexa global
'http://www.camfrog.com/',
# Why: #3079 in Alexa global
'http://www.philly.com/',
# Why: #3080 in Alexa global
'http://www.zemtv.com/',
# Why: #3081 in Alexa global
'http://www.oprah.com/',
# Why: #3082 in Alexa global
'http://www.wmaraci.com/',
# Why: #3083 in Alexa global
'http://www.ruvr.ru/',
# Why: #3084 in Alexa global
'http://www.gsn.com/',
# Why: #3085 in Alexa global
'http://www.acrobat.com/',
# Why: #3086 in Alexa global
'http://www.depositfiles.org/',
# Why: #3087 in Alexa global
'http://www.smartresponder.ru/',
# Why: #3088 in Alexa global
'http://www.huxiu.com/',
# Why: #3089 in Alexa global
'http://www.porn-wanted.com/',
# Why: #3090 in Alexa global
'http://www.tripadvisor.fr/',
# Why: #3091 in Alexa global
'http://3366.com/',
# Why: #3092 in Alexa global
'http://www.ranker.com/',
# Why: #3093 in Alexa global
'http://www.cibc.com/',
# Why: #3094 in Alexa global
'http://www.trend.az/',
# Why: #3095 in Alexa global
'http://www.whatsapp.com/',
# Why: #3096 in Alexa global
'http://07073.com/',
# Why: #3097 in Alexa global
'http://www.netload.in/',
# Why: #3098 in Alexa global
'http://www.channel4.com/',
# Why: #3099 in Alexa global
'http://www.yatra.com/',
# Why: #3100 in Alexa global
'http://www.elconfidencial.com/',
# Why: #3101 in Alexa global
'http://www.labnol.org/',
# Why: #3102 in Alexa global
'http://www.google.co.ke/',
# Why: #3103 in Alexa global
'http://www.disneylatino.com/',
# Why: #3104 in Alexa global
'http://www.pconverter.com/',
# Why: #3105 in Alexa global
'http://www.cqnews.net/',
# Why: #3106 in Alexa global
'http://www.blog.co.uk/',
# Why: #3107 in Alexa global
'http://www.immowelt.de/',
# Why: #3108 in Alexa global
'http://www.crunchyroll.com/',
# Why: #3109 in Alexa global
'http://www.gamesgames.com/',
# Why: #3110 in Alexa global
'http://www.protothema.gr/',
# Why: #3111 in Alexa global
'http://www.vmoptions.com/',
# Why: #3112 in Alexa global
'http://www.go2jump.org/',
# Why: #3113 in Alexa global
'http://www.psu.edu/',
# Why: #3114 in Alexa global
'http://www.sanjesh.org/',
# Why: #3115 in Alexa global
'http://www.sportingnews.com/',
# Why: #3116 in Alexa global
'http://www.televisionfanatic.com/',
# Why: #3117 in Alexa global
'http://www.fansshare.com/',
# Why: #3118 in Alexa global
'http://www.xcams4u.com/',
# Why: #3119 in Alexa global
'http://www.dict.cn/',
# Why: #3120 in Alexa global
'http://www.madthumbs.com/',
# Why: #3121 in Alexa global
'http://www.ebates.com/',
# Why: #3122 in Alexa global
'http://www.eromon.net/',
# Why: #3123 in Alexa global
'http://www.copyblogger.com/',
# Why: #3124 in Alexa global
'http://www.flirt4free.com/',
# Why: #3125 in Alexa global
'http://www.gaytube.com/',
# Why: #3126 in Alexa global
'http://www.notdoppler.com/',
# Why: #3127 in Alexa global
'http://www.allmyvideos.net/',
# Why: #3128 in Alexa global
'http://www.cam4.de.com/',
# Why: #3129 in Alexa global
'http://www.chosun.com/',
# Why: #3130 in Alexa global
'http://www.adme.ru/',
# Why: #3131 in Alexa global
'http://www.codeplex.com/',
# Why: #3132 in Alexa global
'http://www.jumia.com.ng/',
# Why: #3133 in Alexa global
'http://www.digitaltrends.com/',
# Why: #3134 in Alexa global
'http://www.b92.net/',
# Why: #3135 in Alexa global
'http://www.miniinthebox.com/',
# Why: #3136 in Alexa global
'http://www.radaronline.com/',
# Why: #3137 in Alexa global
'http://www.hujiang.com/',
# Why: #3138 in Alexa global
'http://www.gardenweb.com/',
# Why: #3139 in Alexa global
'http://www.pizap.com/',
# Why: #3140 in Alexa global
'http://www.iptorrents.com/',
# Why: #3141 in Alexa global
'http://www.yuku.com/',
# Why: #3142 in Alexa global
'http://www.mega-giochi.it/',
# Why: #3143 in Alexa global
'http://www.nrk.no/',
# Why: #3144 in Alexa global
'http://www.99designs.com/',
# Why: #3145 in Alexa global
'http://www.uscis.gov/',
# Why: #3146 in Alexa global
'http://www.lostfilm.tv/',
# Why: #3147 in Alexa global
'http://www.mileroticos.com/',
# Why: #3148 in Alexa global
'http://www.republika.co.id/',
# Why: #3149 in Alexa global
'http://www.sharethis.com/',
# Why: #3150 in Alexa global
'http://www.samplicio.us/',
# Why: #3151 in Alexa global
'http://www.1saleaday.com/',
# Why: #3152 in Alexa global
'http://www.vonelo.com/',
# Why: #3153 in Alexa global
'http://www.oyunmoyun.com/',
# Why: #3154 in Alexa global
'http://www.flightradar24.com/',
# Why: #3155 in Alexa global
'http://www.geo.tv/',
# Why: #3156 in Alexa global
'http://www.nexusmods.com/',
# Why: #3157 in Alexa global
'http://www.mizuhobank.co.jp/',
# Why: #3158 in Alexa global
'http://www.blogspot.fi/',
# Why: #3159 in Alexa global
'http://www.directtrack.com/',
# Why: #3160 in Alexa global
'http://www.media.net/',
# Why: #3161 in Alexa global
'http://www.bigresource.com/',
# Why: #3162 in Alexa global
'http://www.free-lance.ru/',
# Why: #3163 in Alexa global
'http://www.loveplanet.ru/',
# Why: #3164 in Alexa global
'http://www.ilfattoquotidiano.it/',
# Why: #3165 in Alexa global
'http://www.coolmovs.com/',
# Why: #3166 in Alexa global
'http://www.mango.com/',
# Why: #3167 in Alexa global
'http://www.nj.com/',
# Why: #3168 in Alexa global
'http://www.magazineluiza.com.br/',
# Why: #3169 in Alexa global
'http://www.datehookup.com/',
# Why: #3170 in Alexa global
'http://www.registro.br/',
# Why: #3171 in Alexa global
'http://www.debenhams.com/',
# Why: #3172 in Alexa global
'http://www.jqueryui.com/',
# Why: #3173 in Alexa global
'http://www.palcomp3.com/',
# Why: #3174 in Alexa global
'http://www.opensubtitles.org/',
# Why: #3175 in Alexa global
'http://www.socialmediatoday.com/',
# Why: #3176 in Alexa global
'http://3158.cn/',
# Why: #3178 in Alexa global
'http://www.allgameshome.com/',
# Why: #3179 in Alexa global
'http://www.pricegrabber.com/',
# Why: #3180 in Alexa global
'http://www.lufthansa.com/',
# Why: #3181 in Alexa global
'http://www.ip-adress.com/',
# Why: #3182 in Alexa global
'http://www.business-standard.com/',
# Why: #3183 in Alexa global
'http://www.games.com/',
# Why: #3184 in Alexa global
'http://www.zaman.com.tr/',
# Why: #3185 in Alexa global
'http://www.jagranjosh.com/',
# Why: #3186 in Alexa global
'http://www.mint.com/',
# Why: #3187 in Alexa global
'http://www.gorillavid.in/',
# Why: #3188 in Alexa global
'http://www.google.com.om/',
# Why: #3189 in Alexa global
'http://www.blogbigtime.com/',
# Why: #3190 in Alexa global
'http://www.books.com.tw/',
# Why: #3191 in Alexa global
'http://www.korrespondent.net/',
# Why: #3192 in Alexa global
'http://www.nymag.com/',
# Why: #3193 in Alexa global
'http://www.proporn.com/',
# Why: #3194 in Alexa global
'http://ycasmd.info/',
# Why: #3195 in Alexa global
'http://www.persiantools.com/',
# Why: #3196 in Alexa global
'http://www.torrenthound.com/',
# Why: #3197 in Alexa global
'http://www.bestsexo.com/',
# Why: #3198 in Alexa global
'http://www.alwatanvoice.com/',
# Why: #3199 in Alexa global
'http://www.jahannews.com/',
# Why: #3200 in Alexa global
'http://www.bluewin.ch/',
# Why: #3201 in Alexa global
'http://www.sap.com/',
# Why: #3203 in Alexa global
'http://www.rzb.ir/',
# Why: #3204 in Alexa global
'http://www.myorderbox.com/',
# Why: #3205 in Alexa global
'http://www.dealsandsavings.net/',
# Why: #3206 in Alexa global
'http://www.goldenline.pl/',
# Why: #3207 in Alexa global
'http://www.stuff.co.nz/',
# Why: #3208 in Alexa global
'http://www.opentable.com/',
# Why: #3209 in Alexa global
'http://www.4738.com/',
# Why: #3210 in Alexa global
'http://www.freshersworld.com/',
# Why: #3211 in Alexa global
'http://www.state.pa.us/',
# Why: #3212 in Alexa global
'http://www.lavanguardia.com/',
# Why: #3213 in Alexa global
'http://www.sudu.cn/',
# Why: #3214 in Alexa global
'http://www.mob.org/',
# Why: #3215 in Alexa global
'http://www.vodafone.in/',
# Why: #3216 in Alexa global
'http://www.blogdetik.com/',
# Why: #3217 in Alexa global
'http://www.888.it/',
# Why: #3218 in Alexa global
'http://www.passportindia.gov.in/',
# Why: #3219 in Alexa global
'http://www.ssa.gov/',
# Why: #3220 in Alexa global
'http://www.desitvforum.net/',
# Why: #3221 in Alexa global
'http://www.8684.cn/',
# Why: #3222 | |
start = [0.,0.,0.]
start[cmapc] = ncstart
start[cmapr] = nrstart
start[cmaps] = nsstart
#print 'start', start
origin = [0.,0.,0.]
origin[cmapc] = (start[cmapc]-center[cmapc])/float(dims[cmapc])
origin[cmapr] = (start[cmapr]-center[cmapr])/float(dims[cmapr])
origin[cmaps] = (start[cmaps]-center[cmaps])/float(dims[cmaps])
h['origin'] = origin
#print 'origin', origin
stepSize = [0.,0.,0.]
stepSize[cmapc] = 1./float(dims[cmapc])
stepSize[cmapr] = 1./float(dims[cmapr])
stepSize[cmaps] = 1./float(dims[cmaps])
#print 'stepSize', stepSize
crystal = Crystal( (acell, bcell, ccell), (alpha, beta, gamma))
g = gtype( self.data, origin, stepSize, h, crystal)
h['dimensions'] = str(self.data.shape)
if disp_out is True:
print self.describe(h)
print 'time to read file: ', time()-t1
return g
import re
import array
class ReadCNS(VolumeReaderBase):
"""Read a CNS/XPLOR file and return a Grid3D object"""
def read(self, filename, disp_out=True, normalize=True):
t1 = time()
sw = "" # used in format to swap bytes if necessary
self.filename = filename
h = self.header = {}
h['mapType'] = 'CNS/XPLOR'
h['filename'] = filename
# open file to read in ascii mode
f = open(filename, 'r')
numOfLines=len(f.readlines())
f.close
# read the header lines
# content = f.readlines()
count = 0
f = open(filename, 'r')
cns_file_error = False
# determine location of cell edges in header
i=0
while i < numOfLines:
line=f.readline()
# gather grid information
if re.search("^(\s+-?\d+){9}", line):
edges=re.split("\s+", line)
na=int(edges[1])
amin=int(edges[2])
amax=int(edges[3])
nb=int(edges[4])
bmin=int(edges[5])
bmax=int(edges[6])
nc=int(edges[7])
cmin=int(edges[8])
cmax=int(edges[9])
# calculate edge lengths
alen, blen, clen = (amax-amin+1, bmax-bmin+1, cmax-cmin+1)
break
i+=1
if i==numOfLines: cns_file_error=True
# gather crystal cell dimensions
cryst_cell=f.readline()
if re.search("^([\s-]\d.+){6}",cryst_cell):
acell=float(cryst_cell[:12])
bcell=float(cryst_cell[12:24])
ccell=float(cryst_cell[24:36])
alpha=float(cryst_cell[36:48])
beta=float(cryst_cell[48:60])
gamma=float(cryst_cell[60:72])
#compute interval step size
sx=1./(na-1)
sy=1./(nb-1)
sz=1./(nc-1)
else: cns_file_error=True
# the following line should contain "ZYX"
if not re.search("ZYX",f.readline()): cns_file_error=True
if cns_file_error is True:
print "ReadCNS: ERROR: %s is not a valid CNS/XPLOR file"%filename
return
h.update({'nc':alen, 'nr':blen, 'ns':clen, 'mode':2})
h.update({'mapc':1, 'mapr':2,'maps':3})
h.update({'ncstart':amin, 'nrstart':bmin, 'nsstart':cmin })
h.update({'acell':acell, 'bcell':bcell, 'ccell':ccell})
h.update({'alpha':alpha, 'beta':beta, 'gamma':gamma})
# allocate array for density
ndata = numpy.zeros((alen, blen, clen), 'f')
# loop over sections, then lines, then columns and read
# a new line everytime we read 6 float (each stored in 12 charaters)
# after each section (i.e. XY plane) there is a 1 line section header
for z in range(clen):
# skip section header line
line = f.readline()
# read first line
line = f.readline()
ci = 0 # initialize character counter on this line
for y in range(blen):
if ci==72: # if we read all the float
line = f.readline() # read next line
ci = 0 # and reset character pointer
for x in range(alen):
if ci==72: # if we read all the float
line = f.readline() # read next line
ci = 0 # and reset character pointer
# put the denity intothe array
ndata[x,y,z] = float(line[ci:ci+12])
ci+=12 # increment the character pointer
self.data = ndata
line = f.readline() # footer - int value must be -9999
assert float(line)==-9999, "penultimate line should hold -9999', got %d"%line
line = f.readline() # density average and standard dev
f.close()
mean, stddev = map(float, line.split())
h['amean'] = mean
h['arms'] = stddev
dims = (alen, blen, clen)
#print 'dims', dims
start = [0.,0.,0.]
start[0] = amin
start[1] = bmin
start[2] = cmin
#print 'start', start
origin = [amin*sx, bmin*sy, cmin*sz]
h['origin'] = origin
#print 'origin', origin
stepSize = [sx,sy,sz]
size, unpackType, arraytype, gtype = self.getDataElemSize(2)
crystal = Crystal( (acell, bcell, ccell), (alpha, beta, gamma))
g = gtype(self.data, origin, stepSize, h, crystal)
# compute min, max, mean rms
amin, amax, amean, arms = g.stats()
h.update( {'amin':amin, 'amax':amax, 'amean':amean, 'arms':arms })
h['dimensions'] = str(self.data.shape)
if disp_out is True:
print self.describe(h)
print 'time: ', time()-t1
return g
def info(self):
txt = """http://www.sinica.edu.tw/~scimath/msi/xplor981/formats.html
The X-PLOR map file begins with an eight-line header.
1. Line 1
An empty line written by the `/ ` FORTRAN format
descriptor in the formatted map file.
2. Lines 2- 5
Title information written as character strings.
These lines are written as 80-character strings
in the formatted file map.
3. Line 6
A series of nine integers:
NA, AMIN, AMAX, NB, BMIN, BMAX, NC, CMIN, CMAX
The values NA, NB and NC indicate the total number
of grid points along the a,b, and c cell edges.
The items AMIN, AMAX, BMIN, BMAX, CMIN, CMAX
indicate the starting and stopping grid points
along each cell edge in the portion of the map that
is written. In the formatted map file this line is
written using the FORTRAN format statement (9I8).
4. Line 7
A series of six double-precision items corresponding to
the crystal cell dimensions a, b, c, alpha, beta, gamma.
In the formatted map file these items are written using
the FORTRAN format statement (6E12.5).
5. Line 8
A three-letter character string which always reads `ZXY'.
"""
print txt
class ReadGRD(VolumeReaderBase):
"""Read a GRD file and return a Grid3D object"""
def read(self, filename, normalize=True, disp_out=True):
t1 = time()
sw = "" # used in format to swap bytes if necessary
self.filename = filename
# open file to read in binary mode
f = open(filename, 'rb')
# read the 512 bytes of the header
data = f.read(512)
h = self.header = {}
h['mapType'] = 'GRD'
h['filename'] = filename
file_num, processor, mode = unpack("3i", data[:12])
# find out about byteswapping (mode has to be 1-3)
# if mode is not in 0-3 then we have to swap
# first determine if byte order is big-endian
if processor not in (1,2,3):
sw=">"
file_num, processor, mode = unpack(sw+"3i", data[:12])
# if not big-endian, check if little-endian
if processor not in (1,2,3):
sw="<"
file_num, processor, mode = unpack(sw+"3i", data[:12])
# else this is an unreadable GRD file
if processor not in (1,2,3):
f.close()
print "ReadGRD: ERROR: %s is not a valid GRD file"%filename
return
offset, nc, nr, ns = unpack(sw+"4i", data[12:28])
cmapc, cmapr, cmaps = unpack(sw+"3i", data[28:40])
mapc = cmapc+1
mapr = cmapr+1
maps = cmaps+1
xtra = unpack(sw+"472c", data[40:512])
h.update( {'file_num':file_num,'processor':processor })
h.update( {'mode':mode })
h.update( {'offset':offset,'nc':nc,'nr':nr,'ns':ns })
h.update( {'mapc':mapc,'mapr':mapr,'maps':maps })
h.update( {'cmapc':cmapc,'cmapr':cmapr,'cmaps':cmaps })
size, unpackType, arraytype, gtype = self.getGRDElemSize(mode)
data = f.read( ) # read to end
f.close()
ndata = numpy.array( unpack(sw+"%d%c"%(nc*nr*ns,unpackType),
data[:nc*nr*ns*size]), arraytype )
self.data = ndata
if (mapc==1 and mapr==2 and maps==3): #fortran style x-fastest
#print 'FORTRAN style'
ndata.shape = (ns,nr,nc)
#transpose the scalar data due to FORTRAN style
self.data = numpy.ascontiguousarray(numpy.transpose(ndata), ndata.dtype.char)
elif (mapc==3 and mapr==2 and maps==1): #C style z-fastest
#print 'C style'
ndata.shape = (ns,nr,nc)
else:
#print 'Generic style'
dims = [0,0,0]
dims[cmapc]=nc
dims[cmapr]=nr
dims[cmaps]=ns
nndata = numpy.zeros( dims, 'f')
cc = [0,0,0]
l = 0
for i in range(ns):
cc[cmaps] = i
for j in range(nr):
cc[cmapr] = j
for k in range(nc):
cc[cmapc] = k
val = ndata.flat[l]
nndata[cc[0], cc[1], cc[2]] = val
l+=1
self.data = nndata
dims = (nc, nr, ns)
origin = [0.,0.,0.]
stepSize = [1.,1.,1.]
h['origin'] = origin
g = gtype( self.data, origin, stepSize, h)
# compute min, max, mean rms
amin, amax, amean, arms = g.stats()
h.update( {'amin':amin, 'amax':amax, 'amean':amean, 'arms':arms })
h['dimensions'] = str(self.data.shape)
if disp_out is True:
print self.describe(h)
print 'time: ', time()-t1
return g
def info(self):
txt = """
GRD is a general purpose grid file format developed by <NAME> and
<NAME> at the M.E. Mueller Institute, Basel. It contains a
default 512 byte header containing 64 four byte records. Only
header records 0-10 are occupied so far.
[0] magic file number
[1] processor where the file was generated:
1 = Data_from_VAX
2 = Data_from_MIPS
3 = Data_from_Convex
[2] data record format:
0 = Data_in_free_fromat
1 = Data_in_u_char
2 = Data_in_char
3 = Data_in_u_short
4 = Data_in_short
5 = Data_in_u_int
6 = Data_in_int
7 = Data_in_u_long
8 = Data_in_long
9 = Data_in_float
10 = Data_in_double
[3] offset of data section after default header
[4] number of columns
[5] number of rows
[6] number of sections
[8] fastest coordinate
[9] medium coordinate
[10] slowest coordinate
The rest of the header section is unused.
"""
print txt
class ReadBRIX(VolumeReaderBase):
"""Read a BRIX or MAPPAGE (DSN6) crystallographic density
map used by O, and return a Grid3D object"""
def read(self, filename, normalize=True, disp_out=True):
t1 = | |
of visit_block, without opening {
self.indent += 1
block = o.body
self._write_body(block.body)
self.indent -= 1
self.write_ind('}\n')
if o.else_body:
raise AssertionError("can't translate for-else")
def _write_cases(self, if_node):
"""
The MyPy AST has a recursive structure for if-elif-elif rather than a
flat one. It's a bit confusing.
"""
assert isinstance(if_node, IfStmt), if_node
assert len(if_node.expr) == 1, if_node.expr
assert len(if_node.body) == 1, if_node.body
expr = if_node.expr[0]
body = if_node.body[0]
# case 1:
# case 2:
# case 3: {
# print('body')
# }
# break; // this indent is annoying but hard to get rid of
assert isinstance(expr, CallExpr), expr
for i, arg in enumerate(expr.args):
if i != 0:
self.write('\n')
self.write_ind('case ')
self.accept(arg)
self.write(': ')
self.accept(body)
self.write_ind(' break;\n')
if if_node.else_body:
first_of_block = if_node.else_body.body[0]
if isinstance(first_of_block, IfStmt):
self._write_cases(first_of_block)
else:
# end the recursion
self.write_ind('default: ')
self.accept(if_node.else_body) # the whole block
# no break here
def _write_switch(self, expr, o):
"""Write a switch statement over integers."""
assert len(expr.args) == 1, expr.args
self.write_ind('switch (')
self.accept(expr.args[0])
self.write(') {\n')
assert len(o.body.body) == 1, o.body.body
if_node = o.body.body[0]
assert isinstance(if_node, IfStmt), if_node
self.indent += 1
self._write_cases(if_node)
self.indent -= 1
self.write_ind('}\n')
def _write_typeswitch(self, expr, o):
"""Write a switch statement over ASDL types."""
assert len(expr.args) == 1, expr.args
self.write_ind('switch (')
self.accept(expr.args[0])
self.write('->tag_()) {\n')
assert len(o.body.body) == 1, o.body.body
if_node = o.body.body[0]
assert isinstance(if_node, IfStmt), if_node
self.indent += 1
self._write_cases(if_node)
self.indent -= 1
self.write_ind('}\n')
def visit_with_stmt(self, o: 'mypy.nodes.WithStmt') -> T:
"""
Translate only blocks of this form:
with switch(x) as case:
if case(0):
print('zero')
elif case(1, 2, 3):
print('low')
else:
print('other')
switch(x) {
case 0:
# TODO: need casting here
print('zero')
break;
case 1:
case 2:
case 3:
print('low')
break;
default:
print('other')
break;
}
Or:
with ctx_Bar(bar, x, y):
x()
{
ctx_Bar(bar, x, y)
x();
}
"""
#log('WITH')
#log('expr %s', o.expr)
#log('target %s', o.target)
assert len(o.expr) == 1, o.expr
expr = o.expr[0]
assert isinstance(expr, CallExpr), expr
if expr.callee.name == 'switch':
self._write_switch(expr, o)
elif expr.callee.name == 'tagswitch':
self._write_typeswitch(expr, o)
else:
assert isinstance(expr, CallExpr), expr
self.write_ind('{ // with\n')
self.indent += 1
self.write_ind('')
self.accept(expr.callee)
self.write(' ctx(')
for i, arg in enumerate(expr.args):
if i != 0:
self.write(', ')
self.accept(arg)
self.write(');\n\n')
#self.write_ind('')
self._write_body(o.body.body)
self.indent -= 1
self.write_ind('}\n')
def visit_del_stmt(self, o: 'mypy.nodes.DelStmt') -> T:
# TODO:
# del mylist[:] -> mylist->clear()
# del mydict[mykey] -> mydict->remove(key)
d = o.expr
if isinstance(d, IndexExpr):
self.write_ind('')
self.accept(d.base)
if isinstance(d.index, SliceExpr):
sl = d.index
assert sl.begin_index is None, sl
assert sl.end_index is None, sl
self.write('->clear()')
else:
self.write('->remove(')
self.accept(d.index)
self.write(')')
self.write(';\n')
def _WriteFuncParams(self, arg_types, arguments):
first = True # first NOT including self
for arg_type, arg in zip(arg_types, arguments):
if not first:
self.decl_write(', ')
# TODO: Turn this on. Having stdlib problems, e.g.
# examples/cartesian.
c_type = get_c_type(arg_type, param=False)
#c_type = get_c_type(arg_type, param=True)
arg_name = arg.variable.name()
# C++ has implicit 'this'
if arg_name == 'self':
continue
self.decl_write('%s %s', c_type, arg_name)
first = False
# We can't use __str__ on these Argument objects? That seems like an
# oversight
#self.log('%r', arg)
if 0:
self.log('Argument %s', arg.variable)
self.log(' type_annotation %s', arg.type_annotation)
# I think these are for default values
self.log(' initializer %s', arg.initializer)
self.log(' kind %s', arg.kind)
def visit_func_def(self, o: 'mypy.nodes.FuncDef') -> T:
# Skip these for now
if o.name() == '__repr__':
return
# No function prototypes when forward declaring.
if self.forward_decl:
self.virtual.OnMethod(self.current_class_name, o.name())
return
# Hack to turn _Next() with keyword arg into a set of overloaded
# methods
#
# Other things I tried:
# if mylib.CPP: def _Next() # MyPy doesn't like this
# if not TYPE_CHECKING: def _Next() # get UnboundType?
# @overload decorator -- not sure how to do it, will probably cause
# runtime overhead
# Have:
# MakeOshParser(_Reader* line_reader, bool emit_comp_dummy)
# Want:
# MakeOshParser(_Reader* line_reader) {
# return MakeOshParser(line_reader, True);
# }
# TODO: restrict this
class_name = self.current_class_name
func_name = o.name()
ret_type = o.type.ret_type
if (class_name in ('BoolParser', 'CommandParser') and
func_name == '_Next' or
class_name == 'ParseContext' and func_name == 'MakeOshParser' or
class_name == 'ErrorFormatter' and func_name == 'PrettyPrintError' or
class_name is None and func_name == 'PrettyPrintError' or
class_name == 'WordParser' and
func_name in ('_ParseVarExpr', '_ReadVarOpArg2') or
class_name == 'AbstractWordEvaluator' and
func_name in ('EvalWordSequence2', '_EvalWordToParts',
'_EmptyStrOrError', '_EvalWordPart') or
# virtual method in several classes
func_name == 'EvalWordToString' or
class_name == 'ArithEvaluator' and func_name == '_ValToIntOrError' or
class_name == 'BoolEvaluator' and
func_name in ('_EvalCompoundWord', '_StringToIntegerOrError') or
class_name == 'CommandEvaluator' and
func_name in ('_Execute', 'ExecuteAndCatch') or
# core/executor.py
class_name == 'ShellExecutor' and func_name == '_MakeProcess' or
# osh/word_eval.py
class_name is None and func_name == 'CheckCompatArray' or
# core/state.py
class_name is None and func_name == '_PackFlags' or
class_name == 'Mem' and func_name in ('GetVar', 'SetVar', 'GetCell') or
class_name == 'SearchPath' and func_name == 'Lookup' or
# core/ui.py
class_name == 'ErrorFormatter' and func_name == 'Print_' or
# osh/sh_expr_eval.py
class_name is None and func_name == 'EvalLhsAndLookup' or
class_name == 'SplitContext' and
func_name in ('SplitForWordEval', '_GetSplitter') or
# qsn_/qsn.py
class_name is None and
func_name in ('maybe_encode', 'maybe_shell_encode') or
# osh/builtin_assign.py
class_name is None and func_name == '_PrintVariables' or
# virtual function
func_name == 'RunSimpleCommand' or
# core/main_loop.py
func_name == 'Batch'
):
default_val = o.arguments[-1].initializer
if default_val: # e.g. osh/bool_parse.py has default val
if self.decl or class_name is None:
func_name = o.name()
else:
func_name = '%s::%s' % (self.current_class_name, o.name())
self.write('\n')
# Write _Next() with no args
virtual = ''
c_ret_type = get_c_type(ret_type)
if isinstance(ret_type, TupleType):
assert c_ret_type.endswith('*')
c_ret_type = c_ret_type[:-1]
self.decl_write_ind('%s%s %s(', virtual, c_ret_type, func_name)
# TODO: Write all params except last optional one
self._WriteFuncParams(o.type.arg_types[:-1], o.arguments[:-1])
self.decl_write(')')
if self.decl:
self.decl_write(';\n')
else:
self.write(' {\n')
# return MakeOshParser()
kw = '' if isinstance(ret_type, NoneTyp) else 'return '
self.write(' %s%s(' % (kw, o.name()))
# Don't write self or last optional argument
first_arg_index = 0 if class_name is None else 1
pass_through = o.arguments[first_arg_index:-1]
if pass_through:
for i, arg in enumerate(pass_through):
if i != 0:
self.write(', ')
self.write(arg.variable.name())
self.write(', ')
# Now write default value, e.g. lex_mode_e::DBracket
self.accept(default_val)
self.write(');\n')
self.write('}\n')
virtual = ''
if self.decl:
self.local_var_list = [] # Make a new instance to collect from
self.local_vars[o] = self.local_var_list
#log('Is Virtual? %s %s', self.current_class_name, o.name())
if self.virtual.IsVirtual(self.current_class_name, o.name()):
virtual = 'virtual '
if not self.decl and self.current_class_name:
# definition looks like
# void Type::foo(...);
func_name = '%s::%s' % (self.current_class_name, o.name())
else:
# declaration inside class { }
func_name = o.name()
self.write('\n')
# TODO: if self.current_class_name ==
# write 'virtual' here.
# You could also test NotImplementedError as abstract?
c_ret_type = get_c_type(ret_type)
if isinstance(ret_type, TupleType):
assert c_ret_type.endswith('*')
c_ret_type = c_ret_type[:-1]
self.decl_write_ind('%s%s %s(', virtual, c_ret_type, func_name)
self._WriteFuncParams(o.type.arg_types, o.arguments)
if self.decl:
self.decl_write(');\n')
self.in_func_body = True
self.accept(o.body) # Collect member_vars, but don't write anything
self.in_func_body = False
return
self.write(') ')
# Write local vars we collected in the 'decl' phase
if not self.forward_decl and not self.decl:
arg_names = [arg.variable.name() for arg in o.arguments]
no_args = [
(lval_name, c_type) for (lval_name, c_type) in self.local_vars[o]
if lval_name not in arg_names
]
self.prepend_to_block = no_args
self.in_func_body = True
self.accept(o.body)
self.in_func_body = False
def visit_overloaded_func_def(self, o: 'mypy.nodes.OverloadedFuncDef') -> T:
pass
def visit_class_def(self, o: 'mypy.nodes.ClassDef') -> T:
#log(' CLASS %s', o.name)
base_class_name = None # single inheritance only
for b in o.base_type_exprs:
if isinstance(b, NameExpr):
# TODO: inherit from std::exception?
if b.name != 'object' and b.name != 'Exception':
base_class_name = b.name
elif isinstance(b, MemberExpr): # vm._Executor -> vm::_Executor
assert isinstance(b.expr, NameExpr), b
base_class_name = '%s::%s' % (b.expr.name, b.name)
# Forward declare types because they may be used in prototypes
if self.forward_decl:
self.decl_write_ind('class %s;\n', o.name)
if base_class_name:
self.virtual.OnSubclass(base_class_name, o.name)
# Visit class body so we get method declarations
self.current_class_name = o.name
self._write_body(o.defs.body)
self.current_class_name = None
return
if self.decl:
self.member_vars.clear() # make a new list
self.decl_write('\n')
self.decl_write_ind('class %s', o.name) # block after this
# e.g. class TextOutput : public ColorOutput
self.decl_write(' : public %s', base_class_name or 'gc_heap::Obj')
self.decl_write(' {\n')
self.decl_write_ind(' public:\n')
# NOTE: declaration still has to traverse the whole body to fill out
# self.member_vars!!!
block = o.defs
self.indent += 1
self.current_class_name = o.name
for stmt | |
<reponame>dperl-sol/cctbx_project
from __future__ import absolute_import, division, print_function
import iotbx.pdb
from scitbx.array_family import flex
import time
pdb_str="""
CRYST1 454.007 426.033 459.047 90.00 90.00 90.00 P 1
SCALE1 0.002203 0.000000 0.000000 0.00000
SCALE2 0.000000 0.002347 0.000000 0.00000
SCALE3 0.000000 0.000000 0.002178 0.00000
ATOM 1 CA VAL A1358 148.938 119.717 100.000 1.00 33.88 C
ATOM 2 CA PRO A1359 149.108 116.282 101.953 1.00 31.91 C
ATOM 3 CA PRO A1360 146.078 114.542 103.797 1.00 26.33 C
ATOM 4 CA PRO A1361 145.621 112.862 107.337 1.00 18.42 C
ATOM 5 CA THR A1362 145.081 109.243 108.683 1.00 21.37 C
ATOM 6 CA ASP A1363 143.651 106.972 111.575 1.00 20.53 C
ATOM 7 CA LEU A1364 140.301 108.604 112.732 1.00 13.10 C
ATOM 8 CA ARG A1365 138.856 107.558 116.217 1.00 13.90 C
ATOM 9 CA PHE A1366 136.295 108.604 118.951 1.00 12.59 C
ATOM 10 CA THR A1367 136.338 108.844 122.851 1.00 18.27 C
ATOM 11 CA ASN A1368 134.748 110.767 125.861 1.00 35.66 C
ATOM 12 CA ILE A1369 131.093 110.630 124.665 1.00 20.50 C
ATOM 13 CA GLY A1370 128.787 112.946 126.681 1.00 31.22 C
ATOM 14 CA PRO A1371 125.231 114.133 125.725 1.00 30.22 C
ATOM 15 CA ASP A1372 126.459 117.360 124.041 1.00 23.75 C
ATOM 16 CA THR A1373 130.112 116.268 123.662 1.00 17.75 C
ATOM 17 CA MET A1374 132.554 113.816 121.995 1.00 13.70 C
ATOM 18 CA ARG A1375 136.395 113.642 121.605 1.00 12.83 C
ATOM 19 CA VAL A1376 137.530 112.924 118.003 1.00 11.64 C
ATOM 20 CA THR A1377 141.175 111.755 117.305 1.00 13.55 C
ATOM 21 CA TRP A1378 143.436 111.275 114.163 1.00 10.25 C
ATOM 22 CA ALA A1379 147.077 110.870 112.883 1.00 20.66 C
ATOM 23 CA PRO A1380 148.832 113.776 110.968 1.00 25.69 C
ATOM 24 CA PRO A1381 150.776 113.842 107.607 1.00 34.10 C
ATOM 25 CA ASP A1385 154.007 122.439 109.035 1.00 55.68 C
ATOM 26 CA LEU A1386 150.309 122.713 108.083 1.00 39.19 C
ATOM 27 CA THR A1387 148.167 125.829 108.846 1.00 33.58 C
ATOM 28 CA ASN A1388 144.910 123.873 109.428 1.00 18.33 C
ATOM 29 CA PHE A1389 142.989 120.679 110.062 1.00 13.50 C
ATOM 30 CA LEU A1390 139.443 121.388 108.928 1.00 14.05 C
ATOM 31 CA VAL A1391 137.092 119.270 111.076 1.00 11.67 C
ATOM 32 CA ARG A1392 133.729 119.258 109.231 1.00 12.02 C
ATOM 33 CA TYR A1393 130.897 117.502 111.152 1.00 10.39 C
ATOM 34 CA SER A1394 127.111 117.090 110.574 1.00 16.06 C
ATOM 35 CA PRO A1395 124.173 114.979 111.936 1.00 17.95 C
ATOM 36 CA VAL A1396 123.808 111.698 109.904 1.00 24.54 C
ATOM 37 CA LYS A1397 120.134 112.689 109.250 1.00 44.54 C
ATOM 38 CA ASN A1398 121.139 116.103 107.784 1.00 43.24 C
ATOM 39 CA GLU A1399 124.609 115.685 106.151 1.00 54.32 C
ATOM 40 CA GLU A1400 124.190 119.276 104.701 1.00 42.39 C
ATOM 41 CA VAL A1402 128.032 120.991 108.262 1.00 34.73 C
ATOM 42 CA ALA A1403 129.573 122.671 111.271 1.00 31.08 C
ATOM 43 CA GLU A1404 133.269 123.451 110.546 1.00 18.73 C
ATOM 44 CA LEU A1405 136.125 123.710 113.086 1.00 23.21 C
ATOM 45 CA SER A1406 139.558 125.155 112.173 1.00 19.59 C
ATOM 46 CA ILE A1407 141.752 123.692 114.833 1.00 22.08 C
ATOM 47 CA SER A1408 145.018 126.033 115.249 1.00 33.50 C
ATOM 48 CA PRO A1409 148.634 124.336 115.685 1.00 35.29 C
ATOM 49 CA SER A1410 148.476 120.979 118.025 1.00 37.93 C
ATOM 50 CA ASP A1411 145.131 118.710 118.597 1.00 27.98 C
ATOM 51 CA ASN A1412 145.573 115.280 117.118 1.00 21.72 C
ATOM 52 CA ALA A1413 142.225 115.275 119.063 1.00 16.22 C
ATOM 53 CA VAL A1414 139.281 117.816 119.201 1.00 15.68 C
ATOM 54 CA VAL A1415 136.371 117.803 121.698 1.00 15.31 C
ATOM 55 CA LEU A1416 133.225 118.528 119.662 1.00 16.01 C
ATOM 56 CA THR A1417 130.718 120.452 121.897 1.00 20.05 C
ATOM 57 CA ASN A1418 127.070 121.739 121.572 1.00 32.11 C
ATOM 58 CA LEU A1419 125.988 118.404 119.986 1.00 15.97 C
ATOM 59 CA LEU A1420 122.395 117.086 119.982 1.00 22.66 C
ATOM 60 CA PRO A1421 121.812 114.423 122.733 1.00 20.32 C
ATOM 61 CA GLY A1422 121.263 110.823 121.509 1.00 23.77 C
ATOM 62 CA THR A1423 122.224 111.850 117.885 1.00 18.46 C
ATOM 63 CA GLU A1424 124.604 110.218 115.318 1.00 14.18 C
ATOM 64 CA TYR A1425 127.176 112.502 113.569 1.00 13.02 C
ATOM 65 CA VAL A1426 129.513 112.297 110.551 1.00 12.30 C
ATOM 66 CA VAL A1427 132.997 113.789 111.143 1.00 12.16 C
ATOM 67 CA SER A1428 135.627 114.576 108.470 1.00 11.24 C
ATOM 68 CA VAL A1429 139.197 116.027 108.576 1.00 11.54 C
ATOM 69 CA SER A1430 141.130 117.805 105.734 1.00 13.82 C
ATOM 70 CA SER A1431 144.703 119.108 106.173 1.00 18.85 C
ATOM 71 CA VAL A1432 145.115 122.769 105.027 1.00 24.56 C
ATOM 72 CA TYR A1433 148.261 124.777 104.098 1.00 45.81 C
ATOM 73 CA HIS A1436 144.719 125.120 100.347 1.00 55.81 C
ATOM 74 CA GLU A1437 142.703 122.002 101.363 1.00 29.48 C
ATOM 75 CA SER A1438 143.992 118.417 101.042 1.00 22.89 C
ATOM 76 CA THR A1439 141.620 115.400 100.644 1.00 28.41 C
ATOM 77 CA PRO A1440 139.399 114.638 103.739 1.00 17.14 C
ATOM 78 CA LEU A1441 139.506 111.600 106.101 1.00 14.44 C
ATOM 79 CA ARG A1442 135.936 110.522 107.346 1.00 14.35 C
ATOM 80 CA GLY A1443 134.058 108.636 110.191 1.00 15.43 C
ATOM 81 CA ARG A1444 130.819 108.445 112.368 1.00 11.66 C
ATOM 82 CA GLN A1445 129.772 108.415 116.114 1.00 11.50 C
ATOM 83 CA LYS A1446 126.623 108.803 118.415 1.00 16.19 C
ATOM 84 CA THR A1447 126.219 111.062 121.551 1.00 17.99 C
ATOM 85 CA GLY A1448 124.870 110.091 125.040 1.00 29.95 C
ATOM 86 CA LEU A1449 121.592 110.926 126.887 1.00 22.66 C
ATOM 87 CA ASP A1450 121.190 114.131 128.936 1.00 29.34 C
ATOM 88 CA SER A1451 119.589 114.286 132.415 1.00 22.82 C
ATOM 89 CA PRO A1452 116.259 115.773 133.432 1.00 20.71 C
ATOM 90 CA THR A1453 116.918 119.260 134.966 1.00 21.30 C
ATOM 91 CA GLY A1454 115.025 121.690 137.311 1.00 40.30 C
ATOM 92 CA ILE A1455 113.515 119.461 140.065 1.00 15.59 C
ATOM 93 CA ASP A1456 110.666 121.305 141.864 1.00 20.21 C
ATOM 94 CA PHE A1457 108.039 120.443 144.565 1.00 13.19 C
ATOM 95 CA SER A1458 104.364 121.636 144.401 1.00 31.40 C
ATOM 96 CA ASP A1459 100.972 120.385 145.865 1.00 30.41 C
ATOM 97 CA ILE A1460 102.658 120.059 149.305 1.00 24.94 C
ATOM 98 CA THR A1461 100.000 118.617 151.677 1.00 48.83 C
ATOM 99 CA ASN A1463 100.070 113.301 152.758 1.00 22.51 C
ATOM 100 CA SER A1464 101.256 114.201 149.190 1.00 18.68 C
ATOM 101 CA PHE A1465 103.502 116.463 147.133 1.00 13.23 C
ATOM 102 CA THR A1466 103.878 116.832 143.319 1.00 15.46 C
ATOM 103 CA VAL A1467 107.394 116.566 141.859 1.00 13.69 C
ATOM 104 CA HIS A1468 108.182 118.418 138.594 1.00 14.58 C
ATOM 105 CA TRP A1469 111.247 118.176 136.299 1.00 10.82 C
ATOM 106 CA ILE A1470 112.365 119.716 132.973 1.00 19.51 C
ATOM 107 CA ALA A1471 112.613 117.148 130.133 1.00 23.06 C
ATOM 108 CA PRO A1472 116.007 116.133 128.576 1.00 26.02 C
ATOM 109 CA ARG A1473 116.707 117.205 124.930 1.00 24.80 C
ATOM 110 CA ALA A1474 117.405 113.548 123.992 1.00 30.93 C
ATOM 111 CA THR A1475 114.556 111.456 122.577 1.00 27.61 C
ATOM 112 CA ILE A1476 113.479 109.292 125.565 1.00 20.47 C
ATOM 113 CA THR A1477 111.104 106.367 126.245 1.00 20.70 C
ATOM 114 CA GLY A1478 110.236 107.515 129.830 1.00 17.71 C
ATOM 115 CA TYR A1479 111.339 108.336 133.412 1.00 11.13 C
ATOM 116 CA ARG A1480 112.030 106.377 136.642 1.00 11.29 C
ATOM 117 CA ILE A1481 111.354 108.176 139.957 1.00 11.59 C
ATOM 118 CA ARG A1482 112.549 106.852 143.378 1.00 11.91 C
ATOM 119 CA HIS A1483 111.280 108.193 146.740 1.00 12.39 C
ATOM 120 CA HIS A1484 111.933 107.485 150.478 1.00 13.07 C
ATOM 121 CA PRO A1485 111.962 109.317 153.882 1.00 16.85 C
ATOM 122 CA GLU A1486 115.450 110.852 154.527 1.00 35.55 C
ATOM 123 CA HIS A1487 116.161 108.541 157.533 1.00 37.44 C
ATOM 124 CA PHE A1488 114.775 105.425 155.653 1.00 55.77 C
ATOM 125 CA GLY A1490 114.925 100.391 153.173 1.00 51.28 | |
# -*- coding: utf-8 -*-
"""
.. moduleauthor:: <NAME> <<EMAIL>>
.. moduleauthor:: <NAME> <<EMAIL>>
.. moduleauthor:: <NAME> <<EMAIL>>
.. moduleauthor:: <NAME> <<EMAIL>>
"""
from __future__ import unicode_literals, print_function
import os
import util
import spotipy.util
import pygit2
import spotipy
class SpotifyUser(object):
"""
A class that creates a Spotify user "self"
"""
def __init__(self, username, client_id, client_secret, redirect_uri):
"""
A class that creates a Spotify user "self"
:param username: Name of Spotify user
:type username: string
:param client_id: ID given by Spotify to user
:type client_id: string
:param client_secret: Confirmation to use program
:type client_secret: string
:param redirect_uri: Another Confirmation to use program
:type redirect_uri: string
"""
self.username = username
# add the scope for things we need, can change over time if we need
# less
scope = "playlist-read-private "
scope += "playlist-read-collaborative "
scope += "playlist-modify-public "
scope += "playlist-modify-private "
# directory that the gitfiles will be stored in
self.git_dir = ".activePlaylists/"
# need to write more code to get the author (and comitter)
# might want to change comitter to local git user
self.author = pygit2.Signature("spotify username", "spotify email")
self.comitter = pygit2.Signature("spotify username", "spotify email")
# gets the token from the spotify api, can not do anything without this
self.token = spotipy.util.prompt_for_user_token(
username, client_id=client_id,
client_secret=client_secret,
redirect_uri=redirect_uri,
scope=scope
)
# error out if we don't have a token
if self.token == None:
raise RuntimeError("Cannot get token from " + username)
# use the token to create a new spotify session
self.sp = spotipy.Spotify(auth=self.token)
# get the current spotify playlists
self.playlists = self.sp.user_playlists(username)['items']
def get_playlist_ids(self):
"""Funtion to get playlist ids of user
:param self: Spotify user
:returns: list -- list of ids in following format ''{"pid": foo, "uid": bar}''
"""
ids = []
for playlist in self.playlists:
ids.append({"pid": playlist["id"], "uid": playlist["owner"]["id"]})
return ids
def get_playlist_id(self, position):
"""Function to get a single playlist's ID
:param position: specifies what playlist to get
:type position: string
:returns: ID in following format '{"pid": foo, "uid": bar}'
"""
position = int(position)
return {"pid": self.playlists[position]["id"], "uid": self.playlists[position]["owner"]["id"]}
def get_playlist_names(self):
"""Function to get all playlist names
:returns: list -- of playlist names
"""
names = []
for playlist in self.playlists:
names.append(playlist["name"])
return names
def get_playlist_from_id(self, pid):
"""Function that returns playlist name from pid
:param pid: playlist ID
:type pid: string
:returns: playlist name
"""
return self.sp.user_playlist(self.username, pid, fields="name")
def get_playlist_name(self, position):
"""Function that returns playlist name from position
:param position: specifies what playlist to get name from
:type position: string
:returns: playlist name
"""
position = int(position)
return self.playlists[position]["name"]
def get_playlist_tracks(self, uid, pid):
"""Function to get tracks from pid
:param uid: user ID
:type uid: string
:param pid: playlist ID
:type pid: string
:returns: list -- tracks on playlist corresponding to pid
"""
playlistInfo = self.sp.user_playlist(uid, pid)["tracks"]["items"]
return playlistInfo
def init_git_playlist(self, uid, pid):
"""Function to initialize playlist.
:param uid: user ID
:type uid: string
:param pid: playlist ID to initialize
:type pid: string
:raises: RuntimeError
"""
playlist_path = os.path.join(uid, pid)
# gets the track list IDs
trackList = self.get_playlist_tracks(uid, pid)
# make sure that the directories exist, if not create them
os.makedirs(self.git_dir, exist_ok=True)
os.makedirs(self.git_dir + playlist_path, exist_ok=True)
if os.path.isfile(self.git_dir + playlist_path + "/index.txt"):
raise RuntimeError("Tried to clone playlist when one of the " +
"same playlist has been cloned already.")
with open(self.git_dir + playlist_path + "/index.txt", "w") as f:
for track in trackList:
if track["track"]["id"] != None: # ignore local files
print(track["track"]["id"], file=f)
# create repo and build tree
new_repo = pygit2.init_repository(self.git_dir + playlist_path)
new_tree = new_repo.TreeBuilder().write()
first_commit = new_repo.create_commit(
"HEAD", self.author, self.comitter, "Created master", new_tree, [])
# create blob for the index file
file_blob = new_repo.create_blob_fromdisk(
os.path.join(self.git_dir, playlist_path, 'index.txt'))
# build tree again
new_tree = new_repo.TreeBuilder()
# add our new index file
new_tree.insert("index.txt", file_blob,
os.stat(self.git_dir + playlist_path + "/index.txt").st_mode)
# build tree again
tree = new_tree.write()
# add the index file to the repo
new_repo.index.read()
new_repo.index.add("index.txt")
new_repo.index.write()
# commit the file
new_repo.create_commit(
"HEAD", self.author, self.comitter, "Added index.txt", tree,
[first_commit])
def add_song_to_playlist(self, uid, pid, songid):
"""Function to add song to playlist
:param uid: user ID
:type uid: string
:param pid: playlist ID to add song to
:type pid: string
:param songid: ID of song to add
:type songid: string
:raises: RuntimeError
"""
playlist_path = os.path.join(uid, pid)
util.check_if_git_playlist(self.git_dir, playlist_path)
with open(self.git_dir + playlist_path + "/index.txt", "r+") as f:
songIds = []
for line in f.readlines():
line = line.strip()
songIds.append(line)
if songid == line:
raise RuntimeError("Song is already in playlist")
print(songid, file=f)
# get the repo
repo = pygit2.Repository(self.git_dir + playlist_path)
# create a new blob for our new index
file_blob = repo.create_blob_fromdisk(
self.git_dir + playlist_path + "/index.txt")
# build the tree
new_tree = repo.TreeBuilder()
# add the index file
new_tree.insert("index.txt", file_blob,
os.stat(self.git_dir + playlist_path + "/index.txt").st_mode)
new_tree.write()
def remove_song_from_playlist(self, uid, pid, songid):
"""Function to remove song from playlist
:param uid: user ID
:type uid: string
:param pid: playlist ID to remove song from
:type pid: string
:param songid: ID of song to remove
:type songid: string
:raises: RuntimeError
"""
playlist_path = uid + "/" + pid
util.check_if_git_playlist(self.git_dir, playlist_path)
with open(self.git_dir + playlist_path + "/index.txt", "r+") as f:
songIds = []
found_song = False
for line in f.readlines():
line = line.strip()
if songid == line:
found_song = True
else:
songIds.append(line)
if found_song == False:
raise RuntimeError("playlist does not have song.")
# go to the start of the text file
f.seek(0)
for ID in songIds:
print(ID, file=f)
# ignore the rest of the text file (parts that were already there)
f.truncate()
repo = pygit2.Repository(self.git_dir + playlist_path)
# create the file blob
file_blob = repo.create_blob_fromdisk(
self.git_dir + playlist_path + "/index.txt")
new_tree = repo.TreeBuilder()
# insert it into the tree
new_tree.insert("index.txt", file_blob,
os.stat(self.git_dir + playlist_path + "/index.txt").st_mode)
new_tree.write()
def commit_changes_to_playlist(self, uid, pid):
"""Function to commit changes to playlist
:param uid: user ID
:type uid: string
:param pid: playlist ID
:type pid: string
"""
playlist_path = uid + "/" + pid
util.check_if_git_playlist(self.git_dir, playlist_path)
# get the repo
repo = pygit2.Repository(self.git_dir + playlist_path)
# create the file blob
file_blob = repo.create_blob_fromdisk(
self.git_dir + playlist_path + "/index.txt")
new_tree = repo.TreeBuilder()
# insert it into the tree
new_tree.insert("index.txt", file_blob,
os.stat(self.git_dir + playlist_path + "/index.txt").st_mode)
tree = new_tree.write()
# add to commit
repo.index.read()
repo.index.add("index.txt")
repo.index.write()
# commit changes to playlist
repo.create_commit("HEAD", self.author, self.comitter,
"Changes committed to " + playlist_path, tree, [repo.head.target])
def pull_spotify_playlist(self, uid, pid):
"""Function to pull playlist from Spotify
:param uid: user ID
:type uid: string
:param pid: playlist ID
:type pid: string
:returns: string -- stating status of pull (either successfull or not)
"""
playlist_path = uid + "/" + pid
util.check_if_git_playlist(self.git_dir, playlist_path)
# grab tracks from spotify from pid
results = self.sp.user_playlist_tracks(self.username, pid)
results = results["items"]
# get just a list of the track ids from the response
remote_tracks = []
for track in results:
if track["track"]["id"] != None: # only take spotify tracks
remote_tracks.append(track["track"]["id"])
# get local track ids
with open(self.git_dir + playlist_path + "/index.txt") as f:
local_tracks = f.read().splitlines()
# merge tracks by adding if not added already. local takes precendence
# does not preserve position of new remote tracks
diff = False
for remoteTrack in remote_tracks:
if remoteTrack not in local_tracks:
local_tracks.append(remoteTrack)
diff = True
# write tracks back to file
with open(self.git_dir + playlist_path + "/index.txt", "w") as f:
for track in local_tracks:
print(track, file=f)
# commit playlist changes if needed
if diff:
self.commit_changes_to_playlist(uid, pid)
return 'Added and committed changes from remote.'
return 'No changes committed, up to date with remote.'
def push_spotify_playlist(self, uid, pid):
"""Function to push playlist to Spotify
:param uid: user ID
:type uid: string
:param pid: playlist ID
:type pid: string
:returns: string -- stating status of pull (either successfull or not)
"""
playlist_path = uid + "/" + pid
util.check_if_git_playlist(self.git_dir, playlist_path)
# grab tracks from spotify from pid
results = self.sp.user_playlist_tracks(self.username, pid)
results = results["items"]
# get just a list of the track ids from the response
remote_tracks = []
for track in results:
if track["track"]["id"] | |
<filename>TNSAgent/tns/campus_agent.py
# -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
#
# Copyright (c) 2015, Battelle Memorial Institute
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project.
#
# This material was prepared as an account of work sponsored by an
# agency of the United States Government. Neither the United States
# Government nor the United States Department of Energy, nor Battelle,
# nor any of their employees, nor any jurisdiction or organization
# that has cooperated in the development of these materials, makes
# any warranty, express or implied, or assumes any legal liability
# or responsibility for the accuracy, completeness, or usefulness or
# any information, apparatus, product, software, or process disclosed,
# or represents that its use would not infringe privately owned rights.
#
# Reference herein to any specific commercial product, process, or
# service by trade name, trademark, manufacturer, or otherwise does
# not necessarily constitute or imply its endorsement, recommendation,
# r favoring by the United States Government or any agency thereof,
# or Battelle Memorial Institute. The views and opinions of authors
# expressed herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY
# operated by BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
#}}}
import os
import sys
import logging
import datetime
from dateutil import parser
from volttron.platform.vip.agent import Agent, Core, PubSub, RPC, compat
from volttron.platform.agent import utils
from volttron.platform.agent.utils import (get_aware_utc_now, format_timestamp)
from helpers import *
from measurement_type import MeasurementType
from measurement_unit import MeasurementUnit
from meter_point import MeterPoint
from market import Market
from market_state import MarketState
from neighbor import Neighbor
from local_asset import LocalAsset
from local_asset_model import LocalAssetModel
from myTransactiveNode import myTransactiveNode
from neighbor_model import NeighborModel
from temperature_forecast_model import TemperatureForecastModel
from solar_pv_resource import SolarPvResource
from solar_pv_resource_model import SolarPvResourceModel
from openloop_pnnl_load_predictor import OpenLoopPnnlLoadPredictor
from vertex import Vertex
from timer import Timer
utils.setup_logging()
_log = logging.getLogger(__name__)
__version__ = '0.1'
class CampusAgent(Agent, myTransactiveNode):
def __init__(self, config_path, **kwargs):
Agent.__init__(self, **kwargs)
myTransactiveNode.__init__(self)
self.config_path = config_path
self.config = utils.load_config(config_path)
self.name = self.config.get('name')
self.market_cycle_in_min = int(self.config.get('market_cycle_in_min', 60))
self.duality_gap_threshold = float(self.config.get('duality_gap_threshold', 0.01))
self.building_names = self.config.get('buildings', [])
self.building_powers = self.config.get('building_powers')
self.db_topic = self.config.get("db_topic", "tnc")
self.PV_max_kW = float(self.config.get("PV_max_kW"))
self.city_loss_factor = float(self.config.get("city_loss_factor"))
self.demand_threshold_coef = float(self.config.get('demand_threshold_coef'))
self.monthly_peak_power = float(self.config.get('monthly_peak_power'))
self.neighbors = []
self.city_supply_topic = "{}/city/campus/supply".format(self.db_topic)
self.building_demand_topic = "/".join([self.db_topic, "{}/campus/demand"])
self.campus_demand_topic = "{}/campus/city/demand".format(self.db_topic)
self.campus_supply_topic = "/".join([self.db_topic, "campus/{}/supply"])
self.solar_topic = "/".join([self.db_topic, "campus/pv"])
self.system_loss_topic = "{}/{}/system_loss".format(self.db_topic, self.name)
self.dc_threshold_topic = "{}/{}/dc_threshold_topic".format(self.db_topic, self.name)
self.price_topic = "{}/{}/marginal_prices".format(self.db_topic, self.name)
self.reschedule_interval = timedelta(minutes=10, seconds=1)
self.simulation = self.config.get('simulation', False)
self.simulation_start_time = parser.parse(self.config.get('simulation_start_time'))
self.simulation_one_hour_in_seconds = int(self.config.get('simulation_one_hour_in_seconds'))
Timer.created_time = datetime.now()
Timer.simulation = self.simulation
Timer.sim_start_time = self.simulation_start_time
Timer.sim_one_hr_in_sec = self.simulation_one_hour_in_seconds
@Core.receiver('onstart')
def onstart(self, sender, **kwargs):
# Add other objects: assets, services, neighbors
self.init_objects()
# Subscriptions
self.vip.pubsub.subscribe(peer='pubsub',
prefix=self.city_supply_topic,
callback=self.new_supply_signal)
for bldg in self.building_names:
self.vip.pubsub.subscribe(peer='pubsub',
prefix=self.building_demand_topic.format(bldg),
callback=self.new_demand_signal)
def new_demand_signal(self, peer, sender, bus, topic, headers, message):
_log.debug("At {}, {} receives new demand records: {}".format(Timer.get_cur_time(),
self.name, message))
building_name = message['source']
demand_curves = message['curves']
start_of_cycle = message['start_of_cycle']
fail_to_converged = message['fail_to_converged']
neighbors = [n for n in self.neighbors if n.name == building_name]
if len(neighbors) == 1:
neighbor = neighbors[0]
neighbor.model.receive_transactive_signal(self, demand_curves)
self.balance_market(1, start_of_cycle, fail_to_converged, neighbor)
else:
_log.error("{}: There are {} building(s) with name {}."
.format(self.name, len(neighbors), building_name))
_log.error("Neighbors are: {}".format([x.name for x in self.neighbors]))
_log.error("Message is: {}".format(message))
_log.error("Check value of 'name' key in the config file for building {}.".format(building_name))
def new_supply_signal(self, peer, sender, bus, topic, headers, message):
_log.debug("At {}, {} receives new supply records: {}".format(Timer.get_cur_time(),
self.name, message))
source = message['source']
supply_curves = message['curves']
start_of_cycle = message['start_of_cycle']
fail_to_converged = message['fail_to_converged']
self.city.model.receive_transactive_signal(self, supply_curves)
if start_of_cycle:
self.balance_market(1, start_of_cycle, fail_to_converged)
def balance_market(self, run_cnt, start_of_cycle=False, fail_to_converged=False, fail_to_converged_neighbor=None):
market = self.markets[0] # Assume only 1 TNS market per node
market.signal_new_data = True
market.balance(self) # Assume only 1 TNS market per node
if market.converged:
_log.debug("TNS market {} balanced successfully.".format(market.name))
# Sum all the powers as will be needed by the net supply/demand curve.
market.assign_system_vertices(self)
# Send only if either of the 2 conditions below occurs:
# 1) Model balancing did not converge
# 2) A new cycle (ie. begin of hour)
for n in self.neighbors:
# If the neighbor failed to converge (eg., building1 failed to converge)
if n == fail_to_converged_neighbor and n is not None:
n.model.prep_transactive_signal(market, self)
topic = self.campus_demand_topic
if n != self.city:
topic = self.campus_supply_topic.format(n.name)
n.model.send_transactive_signal(self, topic, start_of_cycle)
_log.debug("NeighborModel {} sent records.".format(n.model.name))
else:
# Always send signal downstream at the start of a new cyle
if start_of_cycle:
if n != self.city:
n.model.prep_transactive_signal(market, self)
topic = self.campus_supply_topic.format(n.name)
n.model.send_transactive_signal(self, topic, start_of_cycle)
_log.debug("NeighborModel {} sent records.".format(n.model.name))
else:
_log.debug("Not start of cycle. Check convergence for neighbor {}.".format(n.model.name))
n.model.check_for_convergence(market)
if not n.model.converged:
n.model.prep_transactive_signal(market, self)
topic = self.campus_demand_topic
if n != self.city:
topic = self.campus_supply_topic.format(n.name)
n.model.send_transactive_signal(self, topic, start_of_cycle)
_log.debug("NeighborModel {} sent records.".format(n.model.name))
else:
_log.debug("{} ({}) did not send records due to check_for_convergence()."
.format(n.model.name, self.name))
# Schedule rerun balancing if not in simulation mode
if not self.simulation:
# For start_of_cyle=True, the code above always send signal to neighbors so don't need to reschedule
# Schedule rerun if any neighbor is not converged
if not start_of_cycle:
if not all([n.model.converged for n in self.neighbors]):
dt = datetime.now()
# Schedule to rerun after 5 minutes if it is in the same hour and is the first reschedule
next_run_dt = dt + self.reschedule_interval
if dt.hour == next_run_dt.hour and run_cnt >= 1:
_log.debug("{} reschedule to run at {}".format(self.name, next_run_dt))
self.core.schedule(next_run_dt, self.balance_market, run_cnt + 1)
prices = market.marginalPrices
# There is a case where the balancing happens at the end of the hour and continues to the next hour, which
# creates 26 values. Get the last 25 values.
prices = prices[-25:]
prices = [x.value for x in prices]
self.vip.pubsub.publish(peer='pubsub',
topic=self.price_topic,
message={'prices': prices,
'current_time': format_timestamp(Timer.get_cur_time())
}
)
else:
_log.debug("Market balancing sub-problem failed.")
self.city.model.prep_transactive_signal(market, self)
self.city.model.send_transactive_signal(self, self.campus_demand_topic, start_of_cycle)
def init_objects(self):
# Add meter
meter = MeterPoint()
meter.measurementType = MeasurementType.PowerReal
meter.name = 'CampusElectricityMeter'
meter.measurementUnit = MeasurementUnit.kWh
self.meterPoints.append(meter)
# Add weather forecast service
weather_service = TemperatureForecastModel(self.config_path, self)
self.informationServiceModels.append(weather_service)
# Add inelastive asset
inelastive_load = LocalAsset()
inelastive_load.name = 'InelasticBuildings' # Campus buildings that are not responsive
inelastive_load.maximumPower = 0 # Remember that a load is a negative power [kW]
inelastive_load.minimumPower = -2 * 8200 # Assume twice the average PNNL load [kW]
# Add inelastive asset model
inelastive_load_model = OpenLoopPnnlLoadPredictor(weather_service)
inelastive_load_model.name = 'InelasticBuildingsModel'
inelastive_load_model.engagementCost = [0, 0, 0] # Transition costs irrelevant
inelastive_load_model.defaultPower = -6000 # [kW]
inelastive_load_model.defaultVertices = [Vertex(0, 0, -6000.0, 1)]
# Cross-reference asset & asset model
inelastive_load_model.object = inelastive_load
inelastive_load.model = inelastive_load_model
# Add solar PV asset
solar_pv = SolarPvResource()
solar_pv.maximumPower = self.PV_max_kW # [avg.kW]
solar_pv.minimumPower = 0.0 # [avg.kW]
solar_pv.name = 'SolarPv'
solar_pv.description = '120 kW solar PV site on the campus'
# Add solar PV asset model
solar_pv_model = SolarPvResourceModel()
solar_pv_model.cloudFactor = 1.0 # dimensionless
solar_pv_model.engagementCost = [0, 0, 0]
solar_pv_model.name = 'SolarPvModel'
solar_pv_model.defaultPower = 0.0 # [avg.kW]
solar_pv_model.defaultVertices = [Vertex(0, 0, 30.0, True)]
solar_pv_model.costParameters = | |
#
# Jasy - Web Tooling Framework
# Copyright 2013-2014 <NAME>
#
import re
import copy
import jasy.core.Console as Console
import jasy.script.api.Comment as Comment
# Operator and punctuator mapping from token to tree node type name.
# NB: because the lexer doesn't backtrack, all token prefixes must themselves
# be valid tokens (e.g. !== is acceptable because its prefixes are the valid
# tokens != and !).
operatorNames = {
'<' : 'lt',
'>' : 'gt',
'<=' : 'le',
'>=' : 'ge',
'!=' : 'ne',
'==' : 'eq',
'!' : 'not',
'+' : 'plus',
'*' : 'mul',
'-' : 'minus',
'/' : 'div',
'%' : 'mod',
'$' : 'dollar',
'^' : 'carat',
'|' : 'pipe',
',' : 'comma',
';' : 'semicolon',
':' : 'colon',
'=' : 'assign',
'&' : 'ampersand',
'~' : 'tilde',
'@' : 'at',
'?' : 'questionmark',
'&&' : 'and',
'||' : 'or',
')' : 'right_paren',
'(' : 'left_paren',
'[' : 'left_bracket',
']' : 'right_bracket',
'{' : 'left_curly',
'}' : 'right_curly'
}
# Assignment operators
assignOperators = ["+", "-", "*", "/", "%", "?"]
#
# Classes
#
class Token:
__slots__ = ["type", "start", "line", "assignOp", "end", "value", "unit", "quote"]
class TokenizerError(Exception):
def __init__(self, message, fileId, line):
self.message = "Tokenization Error: %s" % message
self.fileId = fileId
self.line = line
Exception.__init__(self, self.message)
def __str__(self):
return "%s in %s at %s" % (self.message, self.fileId, self.line)
class Tokenizer(object):
def __init__(self, source, fileId="", line=1):
# source: JavaScript source
# fileId: Filename (for debugging proposes)
# line: Line number (for debugging proposes)
self.cursor = 0
self.source = str(source)
self.tokens = {}
self.tokenIndex = 0
self.lookahead = 0
self.scanNewlines = False
self.fileId = fileId
self.line = line
self.comments = []
input_ = property(lambda self: self.source[self.cursor:])
token = property(lambda self: self.tokens.get(self.tokenIndex))
def done(self):
# We need to set scanOperand to true here because the first thing
# might be a regexp.
return self.peek(True) == "end"
def match(self, tokenType, scanOperand=False):
return self.get(scanOperand) == tokenType or self.unget()
def mustMatch(self, tokenType):
if not self.match(tokenType):
raise TokenizerError("Missing " + tokenType, self.fileId, self.line)
return self.token
def find(self, anyOf):
point = self.save()
while True:
tokenType = self.get()
if tokenType in anyOf:
self.rewind(point)
return tokenType
self.rewind(point)
return None
def peek(self, scanOperand=False):
if self.lookahead:
next = self.tokens.get((self.tokenIndex + self.lookahead) & 3)
if self.scanNewlines and (getattr(next, "line", None) != getattr(self, "line", None)):
tokenType = "newline"
else:
tokenType = getattr(next, "type", None)
else:
tokenType = self.get(scanOperand)
self.unget()
return tokenType
def peekOnSameLine(self, scanOperand=False):
self.scanNewlines = True
tokenType = self.peek(scanOperand)
self.scanNewlines = False
return tokenType
def getComments(self):
if self.comments:
comments = self.comments
self.comments = []
return comments
return None
def skip(self):
"""Eats comments and whitespace."""
input = self.source
startLine = self.line
# Whether this is the first called as happen on start parsing a file (eat leading comments/white space)
startOfFile = self.cursor is 0
indent = ""
self.skippedSpaces = False
self.skippedComments = False
self.skippedLineBreaks = False
while (True):
if len(input) > self.cursor:
ch = input[self.cursor]
else:
break
self.cursor += 1
if len(input) > self.cursor:
next = input[self.cursor]
else:
next = None
if ch == "\n" and not self.scanNewlines:
self.line += 1
indent = ""
self.skippedLineBreaks = True
elif ch == "/" and next == "*":
self.cursor += 1
self.skippedComments = True
text = "/*"
inline = startLine == self.line and startLine > 1
commentStartLine = self.line
if startLine == self.line and not startOfFile:
mode = "inline"
elif (self.line - 1) > startLine:
# distance before this comment means it is a comment block for a whole section (multiple lines of code)
mode = "section"
else:
# comment for maybe multiple following lines of code, but not that important (no visual white space divider)
mode = "block"
while (True):
try:
ch = input[self.cursor]
self.cursor += 1
except IndexError:
raise TokenizerError("Unterminated comment", self.fileId, self.line)
if ch == "*":
next = input[self.cursor]
if next == "/":
text += "*/"
self.cursor += 1
break
elif ch == "\n":
self.line += 1
text += ch
# Filter escaping on slash-star combinations in comment text
text = text.replace("*\/", "*/")
try:
self.comments.append(Comment.Comment(text, mode, commentStartLine, indent, self.fileId))
except Comment.CommentException as commentError:
Console.error("Ignoring comment in %s: %s", self.fileId, commentError)
elif ch == "/" and next == "/":
self.cursor += 1
self.skippedComments = True
text = "//"
if startLine == self.line and not startOfFile:
mode = "inline"
elif (self.line - 1) > startLine:
# distance before this comment means it is a comment block for a whole section (multiple lines of code)
mode = "section"
else:
# comment for maybe multiple following lines of code, but not that important (no visual white space divider)
mode = "block"
while (True):
try:
ch = input[self.cursor]
self.cursor += 1
except IndexError:
# end of file etc.
break
if ch == "\n":
self.line += 1
break
text += ch
try:
self.comments.append(Comment.Comment(text, mode, self.line - 1, "", self.fileId))
except Comment.CommentException:
Console.error("Ignoring comment in %s: %s", self.fileId, commentError)
# check for whitespace, also for special cases like 0xA0
elif ch in "\xA0 \t":
self.skippedSpaces = True
indent += ch
else:
self.cursor -= 1
break
def lexZeroNumber(self, ch):
token = self.token
input = self.source
token.type = "number"
ch = input[self.cursor]
self.cursor += 1
if ch == ".":
while(True):
ch = input[self.cursor]
self.cursor += 1
if not (ch >= "0" and ch <= "9"):
break
self.cursor -= 1
token.value = float(input[token.start:self.cursor])
elif ch == "x" or ch == "X":
while(True):
ch = input[self.cursor]
self.cursor += 1
if not ((ch >= "0" and ch <= "9") or (ch >= "a" and ch <= "f") or (ch >= "A" and ch <= "F")):
break
self.cursor -= 1
token.value = input[token.start:self.cursor]
else:
self.cursor -= 1
token.value = 0
unit = self.lexUnit()
if unit:
token.unit = unit
def lexNumber(self, ch):
token = self.token
input = self.source
token.type = "number"
floating = False
while(True):
ch = input[self.cursor]
self.cursor += 1
if ch == "." and not floating:
floating = True
ch = input[self.cursor]
self.cursor += 1
if not (ch >= "0" and ch <= "9"):
break
self.cursor -= 1
segment = input[token.start:self.cursor]
# Protect float or exponent numbers
if floating:
token.value = float(segment)
else:
token.value = int(segment)
unit = self.lexUnit()
if unit:
token.unit = unit
def lexUnit(self):
"""Parses units like %, cm, inch, px, etc."""
start = self.cursor
input = self.source
while(True):
ch = input[self.cursor]
self.cursor += 1
if not ((ch >= "a" and ch <= "z") or ch == "%"):
break
self.cursor -= 1
segment = input[start:self.cursor]
return segment
def lexDot(self, ch):
token = self.token
input = self.source
next = input[self.cursor]
if next >= "0" and next <= "9":
while (True):
ch = input[self.cursor]
self.cursor += 1
if not (ch >= "0" and ch <= "9"):
break
self.cursor -= 1
token.type = "number"
token.value = float(input[token.start:self.cursor])
unit = self.lexUnit()
if unit:
token.unit = unit
else:
token.type = "dot"
def lexString(self, ch):
token = self.token
input = self.source
token.type = "string"
hasEscapes = False
delim = ch
ch = input[self.cursor]
length = len(input)
self.cursor += 1
while ch != delim:
if ch == "\\":
hasEscapes = True
self.cursor += 1
if self.cursor >= length:
raise TokenizerError("Missing end quote for string!", self.fileId, self.line)
ch = input[self.cursor]
self.cursor += 1
token.value = str(input[token.start + 1:self.cursor - 1])
token.quote = input[token.start]
def lexOp(self, ch):
token = self.token
input = self.source
op = ch
while(True):
try:
next = input[self.cursor]
except IndexError:
break
if (op + next) in operatorNames:
self.cursor += 1
op += next
else:
break
try:
next = input[self.cursor]
except IndexError:
next = None
if next == "=" and op in assignOperators:
self.cursor += 1
token.type = "assign"
token.assignOp = operatorNames[op]
op += "="
elif op in operatorNames:
token.type = operatorNames[op]
token.assignOp = None
else:
raise TokenizerError("Unknown operator: %s!" % op, self.fileId, self.line)
def lexIdent(self, ch):
token = self.token
input = self.source
# Variables/Commands should support packaged/namespaced names e.g. "foo.bar"
isVariable = input[token.start] == "$"
isCommand = input[token.start] == "@"
isHex = input[token.start] == "#"
# Support variable blocks e.g. ${foo}
inVariableBlock = False
| |
{'key': 'properties.privateEndpointConnections', 'type': '[PrivateEndpointConnection]'},
'public_network_access': {'key': 'properties.publicNetworkAccess', 'type': 'str'},
'required_nsg_rules': {'key': 'properties.requiredNsgRules', 'type': 'str'},
}
def __init__(
self,
*,
location: str,
managed_resource_group_id: str,
tags: Optional[Dict[str, str]] = None,
sku: Optional["Sku"] = None,
parameters: Optional["WorkspaceCustomParameters"] = None,
ui_definition_uri: Optional[str] = None,
authorizations: Optional[List["WorkspaceProviderAuthorization"]] = None,
created_by: Optional["CreatedBy"] = None,
updated_by: Optional["CreatedBy"] = None,
storage_account_identity: Optional["ManagedIdentityConfiguration"] = None,
encryption: Optional["WorkspacePropertiesEncryption"] = None,
public_network_access: Optional[Union[str, "PublicNetworkAccess"]] = None,
required_nsg_rules: Optional[Union[str, "RequiredNsgRules"]] = None,
**kwargs
):
super(Workspace, self).__init__(tags=tags, location=location, **kwargs)
self.sku = sku
self.system_data = None
self.managed_resource_group_id = managed_resource_group_id
self.parameters = parameters
self.provisioning_state = None
self.ui_definition_uri = ui_definition_uri
self.authorizations = authorizations
self.created_by = created_by
self.updated_by = updated_by
self.created_date_time = None
self.workspace_id = None
self.workspace_url = None
self.storage_account_identity = storage_account_identity
self.encryption = encryption
self.private_endpoint_connections = None
self.public_network_access = public_network_access
self.required_nsg_rules = required_nsg_rules
class WorkspaceCustomBooleanParameter(msrest.serialization.Model):
"""The value which should be used for this field.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar type: The type of variable that this is. Possible values include: "Bool", "Object",
"String".
:vartype type: str or ~azure_databricks_management_client.models.CustomParameterType
:param value: Required. The value which should be used for this field.
:type value: bool
"""
_validation = {
'type': {'readonly': True},
'value': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'value': {'key': 'value', 'type': 'bool'},
}
def __init__(
self,
*,
value: bool,
**kwargs
):
super(WorkspaceCustomBooleanParameter, self).__init__(**kwargs)
self.type = None
self.value = value
class WorkspaceCustomObjectParameter(msrest.serialization.Model):
"""The value which should be used for this field.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar type: The type of variable that this is. Possible values include: "Bool", "Object",
"String".
:vartype type: str or ~azure_databricks_management_client.models.CustomParameterType
:param value: Required. The value which should be used for this field.
:type value: any
"""
_validation = {
'type': {'readonly': True},
'value': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'value': {'key': 'value', 'type': 'object'},
}
def __init__(
self,
*,
value: Any,
**kwargs
):
super(WorkspaceCustomObjectParameter, self).__init__(**kwargs)
self.type = None
self.value = value
class WorkspaceCustomParameters(msrest.serialization.Model):
"""Custom Parameters used for Cluster Creation.
Variables are only populated by the server, and will be ignored when sending a request.
:param aml_workspace_id: The ID of a Azure Machine Learning workspace to link with Databricks
workspace.
:type aml_workspace_id:
~azure_databricks_management_client.models.WorkspaceCustomStringParameter
:param custom_virtual_network_id: The ID of a Virtual Network where this Databricks Cluster
should be created.
:type custom_virtual_network_id:
~azure_databricks_management_client.models.WorkspaceCustomStringParameter
:param custom_public_subnet_name: The name of a Public Subnet within the Virtual Network.
:type custom_public_subnet_name:
~azure_databricks_management_client.models.WorkspaceCustomStringParameter
:param custom_private_subnet_name: The name of the Private Subnet within the Virtual Network.
:type custom_private_subnet_name:
~azure_databricks_management_client.models.WorkspaceCustomStringParameter
:param enable_no_public_ip: Should the Public IP be Disabled?.
:type enable_no_public_ip:
~azure_databricks_management_client.models.WorkspaceCustomBooleanParameter
:param load_balancer_backend_pool_name: Name of the outbound Load Balancer Backend Pool for
Secure Cluster Connectivity (No Public IP).
:type load_balancer_backend_pool_name:
~azure_databricks_management_client.models.WorkspaceCustomStringParameter
:param load_balancer_id: Resource URI of Outbound Load balancer for Secure Cluster Connectivity
(No Public IP) workspace.
:type load_balancer_id:
~azure_databricks_management_client.models.WorkspaceCustomStringParameter
:param nat_gateway_name: Name of the NAT gateway for Secure Cluster Connectivity (No Public IP)
workspace subnets.
:type nat_gateway_name:
~azure_databricks_management_client.models.WorkspaceCustomStringParameter
:param public_ip_name: Name of the Public IP for No Public IP workspace with managed vNet.
:type public_ip_name: ~azure_databricks_management_client.models.WorkspaceCustomStringParameter
:param prepare_encryption: Prepare the workspace for encryption. Enables the Managed Identity
for managed storage account.
:type prepare_encryption:
~azure_databricks_management_client.models.WorkspaceCustomBooleanParameter
:param encryption: Contains the encryption details for Customer-Managed Key (CMK) enabled
workspace.
:type encryption: ~azure_databricks_management_client.models.WorkspaceEncryptionParameter
:param require_infrastructure_encryption: A boolean indicating whether or not the DBFS root
file system will be enabled with secondary layer of encryption with platform managed keys for
data at rest.
:type require_infrastructure_encryption:
~azure_databricks_management_client.models.WorkspaceCustomBooleanParameter
:param storage_account_name: Default DBFS storage account name.
:type storage_account_name:
~azure_databricks_management_client.models.WorkspaceCustomStringParameter
:param storage_account_sku_name: Storage account SKU name, ex: Standard_GRS, Standard_LRS.
Refer https://aka.ms/storageskus for valid inputs.
:type storage_account_sku_name:
~azure_databricks_management_client.models.WorkspaceCustomStringParameter
:param vnet_address_prefix: Address prefix for Managed virtual network. Default value for this
input is 10.139.
:type vnet_address_prefix:
~azure_databricks_management_client.models.WorkspaceCustomStringParameter
:ivar resource_tags: Tags applied to resources under Managed resource group. These can be
updated by updating tags at workspace level.
:vartype resource_tags:
~azure_databricks_management_client.models.WorkspaceCustomObjectParameter
"""
_validation = {
'resource_tags': {'readonly': True},
}
_attribute_map = {
'aml_workspace_id': {'key': 'amlWorkspaceId', 'type': 'WorkspaceCustomStringParameter'},
'custom_virtual_network_id': {'key': 'customVirtualNetworkId', 'type': 'WorkspaceCustomStringParameter'},
'custom_public_subnet_name': {'key': 'customPublicSubnetName', 'type': 'WorkspaceCustomStringParameter'},
'custom_private_subnet_name': {'key': 'customPrivateSubnetName', 'type': 'WorkspaceCustomStringParameter'},
'enable_no_public_ip': {'key': 'enableNoPublicIp', 'type': 'WorkspaceCustomBooleanParameter'},
'load_balancer_backend_pool_name': {'key': 'loadBalancerBackendPoolName', 'type': 'WorkspaceCustomStringParameter'},
'load_balancer_id': {'key': 'loadBalancerId', 'type': 'WorkspaceCustomStringParameter'},
'nat_gateway_name': {'key': 'natGatewayName', 'type': 'WorkspaceCustomStringParameter'},
'public_ip_name': {'key': 'publicIpName', 'type': 'WorkspaceCustomStringParameter'},
'prepare_encryption': {'key': 'prepareEncryption', 'type': 'WorkspaceCustomBooleanParameter'},
'encryption': {'key': 'encryption', 'type': 'WorkspaceEncryptionParameter'},
'require_infrastructure_encryption': {'key': 'requireInfrastructureEncryption', 'type': 'WorkspaceCustomBooleanParameter'},
'storage_account_name': {'key': 'storageAccountName', 'type': 'WorkspaceCustomStringParameter'},
'storage_account_sku_name': {'key': 'storageAccountSkuName', 'type': 'WorkspaceCustomStringParameter'},
'vnet_address_prefix': {'key': 'vnetAddressPrefix', 'type': 'WorkspaceCustomStringParameter'},
'resource_tags': {'key': 'resourceTags', 'type': 'WorkspaceCustomObjectParameter'},
}
def __init__(
self,
*,
aml_workspace_id: Optional["WorkspaceCustomStringParameter"] = None,
custom_virtual_network_id: Optional["WorkspaceCustomStringParameter"] = None,
custom_public_subnet_name: Optional["WorkspaceCustomStringParameter"] = None,
custom_private_subnet_name: Optional["WorkspaceCustomStringParameter"] = None,
enable_no_public_ip: Optional["WorkspaceCustomBooleanParameter"] = None,
load_balancer_backend_pool_name: Optional["WorkspaceCustomStringParameter"] = None,
load_balancer_id: Optional["WorkspaceCustomStringParameter"] = None,
nat_gateway_name: Optional["WorkspaceCustomStringParameter"] = None,
public_ip_name: Optional["WorkspaceCustomStringParameter"] = None,
prepare_encryption: Optional["WorkspaceCustomBooleanParameter"] = None,
encryption: Optional["WorkspaceEncryptionParameter"] = None,
require_infrastructure_encryption: Optional["WorkspaceCustomBooleanParameter"] = None,
storage_account_name: Optional["WorkspaceCustomStringParameter"] = None,
storage_account_sku_name: Optional["WorkspaceCustomStringParameter"] = None,
vnet_address_prefix: Optional["WorkspaceCustomStringParameter"] = None,
**kwargs
):
super(WorkspaceCustomParameters, self).__init__(**kwargs)
self.aml_workspace_id = aml_workspace_id
self.custom_virtual_network_id = custom_virtual_network_id
self.custom_public_subnet_name = custom_public_subnet_name
self.custom_private_subnet_name = custom_private_subnet_name
self.enable_no_public_ip = enable_no_public_ip
self.load_balancer_backend_pool_name = load_balancer_backend_pool_name
self.load_balancer_id = load_balancer_id
self.nat_gateway_name = nat_gateway_name
self.public_ip_name = public_ip_name
self.prepare_encryption = prepare_encryption
self.encryption = encryption
self.require_infrastructure_encryption = require_infrastructure_encryption
self.storage_account_name = storage_account_name
self.storage_account_sku_name = storage_account_sku_name
self.vnet_address_prefix = vnet_address_prefix
self.resource_tags = None
class WorkspaceCustomStringParameter(msrest.serialization.Model):
"""The Value.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar type: The type of variable that this is. Possible values include: "Bool", "Object",
"String".
:vartype type: str or ~azure_databricks_management_client.models.CustomParameterType
:param value: Required. The value which should be used for this field.
:type value: str
"""
_validation = {
'type': {'readonly': True},
'value': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
*,
value: str,
**kwargs
):
super(WorkspaceCustomStringParameter, self).__init__(**kwargs)
self.type = None
self.value = value
class WorkspaceEncryptionParameter(msrest.serialization.Model):
"""The object that contains details of encryption used on the workspace.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar type: The type of variable that this is. Possible values include: "Bool", "Object",
"String".
:vartype type: str or ~azure_databricks_management_client.models.CustomParameterType
:param value: The value which should be used for this field.
:type value: ~azure_databricks_management_client.models.Encryption
"""
_validation = {
'type': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'value': {'key': 'value', 'type': 'Encryption'},
}
def __init__(
self,
*,
value: Optional["Encryption"] = None,
**kwargs
):
super(WorkspaceEncryptionParameter, self).__init__(**kwargs)
self.type = None
self.value = value
class WorkspaceListResult(msrest.serialization.Model):
"""List of workspaces.
:param value: The array of workspaces.
:type value: list[~azure_databricks_management_client.models.Workspace]
:param next_link: The URL to use for getting the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[Workspace]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["Workspace"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(WorkspaceListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class WorkspacePropertiesEncryption(msrest.serialization.Model):
"""Encryption properties for databricks workspace.
All required parameters must be populated in order to send to Azure.
:param entities: Required. Encryption entities definition for the workspace.
:type entities: ~azure_databricks_management_client.models.EncryptionEntitiesDefinition
"""
_validation = {
'entities': {'required': True},
}
_attribute_map = {
'entities': {'key': 'entities', 'type': 'EncryptionEntitiesDefinition'},
}
def __init__(
self,
*,
entities: "EncryptionEntitiesDefinition",
**kwargs
):
super(WorkspacePropertiesEncryption, self).__init__(**kwargs)
self.entities = entities
class WorkspaceProviderAuthorization(msrest.serialization.Model):
"""The workspace provider authorization.
All required parameters must be populated in order to send to Azure.
:param principal_id: Required. The provider's principal identifier. This is the identity that
the provider will use to call ARM to manage the workspace resources.
:type principal_id: str
:param role_definition_id: Required. The provider's role definition identifier. This role will
define all the permissions that the provider must have on the workspace's container resource
group. This role definition cannot have permission to delete | |
and what services they run).
Args:
keyname: A str that indicates the name of the SSH keypair that
uniquely identifies this AppScale deployment.
Returns:
A str that indicates where the locations.json file can be found.
"""
return cls.LOCAL_APPSCALE_PATH + "locations-" + keyname + ".json"
@classmethod
def cleanup_keyname(cls, keyname):
"""Cleans up all the files starting with the keyname upon termination
of cloud instances.
Args:
keyname: A str that indicates the name of the SSH keypair that
uniquely identifies this AppScale deployment.
"""
file_path = cls.LOCAL_APPSCALE_PATH + keyname + "*"
for keyname_file in glob.glob(file_path):
os.remove(keyname_file)
@classmethod
def update_local_metadata(cls, options, db_master, head_node):
"""Writes a locations.json file to the local filesystem,
that the tools can use to locate machines in an AppScale deployment.
Args:
options: A Namespace that indicates deployment-specific parameters not
relating to the placement strategy in use.
db_master: A str representing the location of the database master.
head_node: A str representing the location we can reach an
AppController at.
"""
# find out every machine's IP address and what they're doing
acc = AppControllerClient(head_node, cls.get_secret_key(options.keyname))
role_info = acc.get_role_info()
infrastructure = options.infrastructure or 'xen'
# write our yaml metadata file
appscalefile_contents = {
'infrastructure' : infrastructure,
'group' : options.group,
}
if infrastructure != 'xen':
appscalefile_contents['zone'] = options.zone
if infrastructure == 'gce':
appscalefile_contents['project'] = options.project
elif infrastructure in ['ec2', 'euca']:
appscalefile_contents['EC2_ACCESS_KEY'] = options.EC2_ACCESS_KEY
appscalefile_contents['EC2_SECRET_KEY'] = options.EC2_SECRET_KEY
appscalefile_contents['EC2_URL'] = options.EC2_URL
elif infrastructure == 'azure':
appscalefile_contents['azure_subscription_id'] = options.azure_subscription_id
appscalefile_contents['azure_app_id'] = options.azure_app_id
appscalefile_contents['azure_app_secret_key'] = options.azure_app_secret_key
appscalefile_contents['azure_tenant_id'] = options.azure_tenant_id
appscalefile_contents['azure_resource_group'] = options.azure_resource_group
appscalefile_contents['azure_storage_account'] = options.azure_storage_account
appscalefile_contents['azure_group_tag'] = options.azure_group_tag
locations_json = {
'node_info': role_info,
'infrastructure_info': appscalefile_contents
}
# and now we can write the json metadata file
with open(cls.get_locations_json_location(options.keyname), 'w') \
as file_handle:
file_handle.write(json.dumps(locations_json))
@classmethod
def clean_local_metadata(cls, keyname):
"""Takes the existing JSON-encoded metadata on disk and assigns all nodes
besides load_balancers (because of public ips) to "open".
Args:
keyname: A str that represents an SSH keypair name, uniquely identifying
this AppScale deployment.
Raises:
BadConfigurationException: If there is no JSON-encoded metadata file
named after the given keyname.
"""
try:
with open(cls.get_locations_json_location(keyname), 'r+') as file_handle:
file_contents = yaml.safe_load(file_handle.read())
# Compatibility support for previous versions of locations file.
if isinstance(file_contents, list):
cls.upgrade_json_file(keyname)
file_handle.seek(0)
file_contents = json.loads(file_handle.read())
cleaned_nodes = []
for node in file_contents.get('node_info'):
if 'load_balancer' not in cls.get_node_roles(node):
node['roles'] = ['open']
cleaned_nodes.append(node)
file_contents['node_info'] = cleaned_nodes
# Now we write the JSON file after our changes.
file_handle.seek(0)
file_handle.truncate()
file_handle.write(json.dumps(file_contents))
except IOError:
raise BadConfigurationException("Couldn't read from locations file.")
@classmethod
def get_infrastructure_option(cls, tag, keyname):
"""Reads the JSON-encoded metadata on disk and returns the value for
the key 'tag' from the dictionary retrieved using the key
'infrastructure_info'.
Args:
keyname: A str that indicates the name of the SSH keypair that
uniquely identifies this AppScale deployment.
tag: A str that indicates what we should look for in the
infrastructure_info dictionary, this tag retrieves an option that was
passed to AppScale at runtime.
"""
try:
with open(cls.get_locations_json_location(keyname), 'r') as file_handle:
file_contents = yaml.safe_load(file_handle.read())
if isinstance(file_contents, list):
cls.upgrade_json_file(keyname)
file_handle.seek(0)
file_contents = yaml.safe_load(file_handle.read())
return file_contents.get('infrastructure_info', {}).get(tag)
except IOError:
raise BadConfigurationException("Couldn't read from locations file, "
"AppScale may not be running with "
"keyname {0}".format(keyname))
@classmethod
def get_local_nodes_info(cls, keyname):
"""Reads the JSON-encoded metadata on disk and returns a list using the
key 'node_info' that indicates which machines run each API service in
this AppScale deployment.
Args:
keyname: A str that represents an SSH keypair name, uniquely identifying
this AppScale deployment.
Returns:
A list of dicts, where each dict contains information on a single machine
in this AppScale deployment.
Raises:
BadConfigurationException: If there is no JSON-encoded metadata file
named after the given keyname.
"""
try:
with open(cls.get_locations_json_location(keyname), 'r') as file_handle:
file_contents = json.loads(file_handle.read())
if isinstance(file_contents, list):
cls.upgrade_json_file(keyname)
file_handle.seek(0)
file_contents = json.loads(file_handle.read())
return file_contents.get('node_info', [])
except IOError:
raise BadConfigurationException("Couldn't read from locations file, "
"AppScale may not be running with "
"keyname {0}".format(keyname))
@classmethod
def upgrade_json_file(cls, keyname):
"""Upgrades the JSON file from the other version where it is a list by
reading the JSON file, reading the YAML file, creating a dictionary in
the "new" format and writing that to the JSON file, and then removing the
YAML file.
Args:
keyname: A str that represents an SSH keypair name, uniquely identifying
this AppScale deployment.
Raises:
BadConfigurationException: If there is no JSON-encoded metadata file,
or there is no YAML-encoded metadata file, or the JSON file couldn't be
written to.
"""
try:
# Open, read, and store the JSON metadata.
with open(cls.get_locations_json_location(keyname), 'r') as file_handle:
role_info = json.loads(file_handle.read())
# If this method is running, there should be a YAML metadata file.
yaml_locations = "{0}locations-{1}.yaml".format(cls.LOCAL_APPSCALE_PATH,
keyname)
# Open, read, and store the YAML metadata.
with open(yaml_locations, 'r') as yaml_handle:
locations_yaml_contents = yaml.safe_load(yaml_handle.read())
# Create a dictionary with the information from both the YAML and JSON
# metadata.
locations_json = {
'node_info': role_info,
'infrastructure_info': locations_yaml_contents
}
# Write the new format to the JSON metadata file.
with open(cls.get_locations_json_location(keyname), 'w') as file_handle:
file_handle.write(json.dumps(locations_json))
# Remove the YAML file because all information from it should be in the
# JSON file now. At this point any failures would have raised the
# Exception.
if os.path.exists(yaml_locations):
os.remove(yaml_locations)
except IOError:
raise BadConfigurationException("Couldn't upgrade locations json "
"file, AppScale may not be running with"
" keyname {0}".format(keyname))
@classmethod
def get_host_for_role(cls, keyname, role):
""" Gets the ip of the host the given role runs on.
Args:
keyname: The SSH keypair name that uniquely identifies this AppScale
deployment.
role: A str, the role we are looking up the host for.
"""
for node in cls.get_local_nodes_info(keyname):
if role in cls.get_node_roles(node):
return node["public_ip"]
@classmethod
def are_disks_used(cls, keyname):
"""Queries the locations.json file to see if any persistent disks are being
used in this AppScale deployment.
Args:
keyname: The SSH keypair name that uniquely identifies this AppScale
deployment.
Returns:
True if any persistent disks are used, and False otherwise.
"""
disks = [node.get("disk") for node in cls.get_local_nodes_info(keyname)]
for disk in disks:
if disk:
return True
return False
@classmethod
def encrypt_password(cls, username, password):
"""Salts the given password with the provided username and encrypts it.
Args:
username: A str representing the username whose password we wish to
encrypt.
password: A str representing the password to encrypt.
Returns:
The SHA1-encrypted password.
"""
return hashlib.sha1(username + password).hexdigest()
@classmethod
def get_node_roles(cls, node):
""" Method to get the roles of the specified node and convert 'jobs' key
to 'roles' if needed.
"""
try:
node['roles'] = node['jobs']
del node['jobs']
except KeyError:
pass
return node['roles']
@classmethod
def get_host_with_role(cls, keyname, role):
"""Searches through the local metadata to see which virtual machine runs the
specified role.
Args:
keyname: The SSH keypair name that uniquely identifies this AppScale
deployment.
role: A str indicating the role to search for.
Returns:
A str containing the host that runs the specified service.
"""
nodes = cls.get_local_nodes_info(keyname)
for node in nodes:
if role in cls.get_node_roles(node):
return node['public_ip']
raise AppScaleException("Couldn't find a {0} node.".format(role))
@classmethod
def get_all_public_ips(cls, keyname):
"""Searches through the local metadata to get all of the public IPs or FQDNs
for machines in this AppScale deployment.
Args:
keyname: The SSH keypair name that uniquely identifies this AppScale
deployment.
Returns:
A list containing all the public IPs or FQDNs in this AppScale deployment.
"""
nodes = cls.get_local_nodes_info(keyname)
return [node['public_ip'] for node in nodes]
@classmethod
def get_credentials(cls, is_admin=True):
"""Queries the user for the username and password that should be set for the
cloud administrator's account in this AppScale deployment.
Args:
is_admin: A bool that indicates if we should be prompting the user for an
admin username/password or not.
Returns:
A tuple containing the username and password that the user typed in.
"""
username = cls.get_username_from_stdin(is_admin)
password = cls.get_password_from_stdin()
return username, password
@classmethod
def get_username_from_stdin(cls, is_admin):
"""Asks the user for the name of the e-mail address that should be made an
administrator on their AppScale cloud or App Engine application.
Returns:
A str containing the e-mail address the user typed in.
"""
while True:
if | |
#!/usr/bin/python
# Based on previous work done by <NAME>, <NAME>
# (Communications of the ACM 30(7), 1987) and <NAME>
# https://github.com/postdataproject/skas-archived/blob/devel/skas/phonmet/syll/grapheme2syllable.py
#
# Presyllabification and syllabification rules are taken from
# <NAME>'s 'El Diccionario Electrónico Fonético del Español'
# https://www.raco.cat/index.php/Elies/article/view/194843
# http://elies.rediris.es/elies4/Fon2.htm
# http://elies.rediris.es/elies4/Fon8.htm
import re
from itertools import product
from spacy.tokens import Doc
from .pipeline import load_pipeline
from .rhymes import STRUCTURES_LENGTH
from .rhymes import analyze_rhyme
from .syllabification import ALTERNATIVE_SYLLABIFICATION
from .syllabification import CONSONANT_CLUSTER_RE
from .syllabification import CONSONANT_GROUP
from .syllabification import CONSONANT_GROUP_EXCEPTION_DL
from .syllabification import CONSONANT_GROUP_EXCEPTION_LL
from .syllabification import HIATUS_FIRST_VOWEL_RE
from .syllabification import LIAISON_FIRST_PART
from .syllabification import LIAISON_SECOND_PART
from .syllabification import LOWERING_DIPHTHONGS_WITH_H
from .syllabification import POSSESSIVE_PRON_UNSTRESSED
from .syllabification import PREFIX_DES_WITH_CONSONANT_RE
from .syllabification import PREFIX_SIN_WITH_CONSONANT_RE
from .syllabification import RAISING_DIPHTHONGS_WITH_H
from .syllabification import SPACE
from .syllabification import STRESSED_PRON
from .syllabification import STRESSED_UNACCENTED_MONOSYLLABLES
from .syllabification import STRONG_VOWELS
from .syllabification import SYLLABIFICATOR_FOREIGN_WORDS_DICT
from .syllabification import UNSTRESSED_FORMS
from .syllabification import UNSTRESSED_UNACCENTED_MONOSYLLABLES
from .syllabification import W_VOWEL_GROUP
from .syllabification import WEAK_VOWELS
from .syllabification import accents_re
from .syllabification import letter_clusters_re
from .syllabification import paroxytone_re
def have_prosodic_liaison(first_syllable, second_syllable):
"""Checks for prosodic liaison between two syllables
:param first_syllable: Dictionary with key syllable (str) and is_stressed (bool) representing
the first syllable
:param second_syllable: Dictionary with key syllable (str) and is_stressed (bool)
representing the second syllable
:return: `True` if there is prosodic liaison and `False` otherwise
:rtype: bool
"""
if second_syllable['syllable'][0].lower() == 'y' and (
len(second_syllable['syllable']) > 1) and (
second_syllable['syllable'][1].lower() in set('aeiouáéíúó')):
return False
else:
return (first_syllable['syllable'][-1] in LIAISON_FIRST_PART
and second_syllable['syllable'][0] in LIAISON_SECOND_PART)
def get_syllables_word_end(words):
"""Get a list of syllables from a list of words extracting word boundaries
:param words: List of dictonaries of syllables for each word in a line
:return: List of dictionaries of syllables with an extra is_word_end key
:rtype: list
"""
syllables = []
for word in words:
if "symbol" in word:
continue
for i, syllable in enumerate(word["word"]):
if i == len(word["word"]) - 1:
syllable["is_word_end"] = True
syllables.append(syllable)
return syllables
def get_phonological_groups(word_syllables, liaison_type="synalepha",
breakage_func=None, liaison_positions=None):
"""Get a list of dictionaries for each phonological group on a line
and joins the syllables to create phonological groups (pronounced together)
according to a type of liaison, either synaloepha or sinaeresis
:param word_syllables: List of dictionaries for each word of the line
:param liaison_type: Which liaison is going to be performed synalepha or
sinaeresis
:param breakage_func: Function to decide when not to break a liaison that is
specified in liaison_positions
:param liaison_positions: Positions of the liaisons
:return: A list of conjoined syllables
:rtype: list
"""
syllables = word_syllables[:]
liaison_property = f"has_{liaison_type}"
if liaison_positions is None:
liaison_positions = [int(syllable.get(liaison_property, 0))
for syllable in syllables]
skip_next = False
while sum(liaison_positions) > 0:
liaison_index = []
reduced_syllables = []
for idx, syllable in enumerate(syllables):
if skip_next:
skip_next = False
continue
breakage = False
if idx < len(syllables) - 1:
next_syllable = syllables[idx + 1]
breakage = (
breakage_func is not None
and breakage_func(liaison_type, syllable, next_syllable)
)
if liaison_positions[idx] and not breakage:
boundary_index = syllable.get(f'{liaison_type}_index', [])
boundary_index.append(len(syllable.get('syllable')) - 1)
liaison = {
'syllable': (syllable["syllable"]
+ next_syllable["syllable"]),
'is_stressed': (syllable["is_stressed"]
or next_syllable["is_stressed"]),
f'{liaison_type}_index': boundary_index,
}
for prop in (liaison_property, "is_word_end"):
has_prop = next_syllable.get(prop, None)
if has_prop is not None:
liaison[prop] = has_prop
reduced_syllables.append(liaison)
liaison_index.append(liaison_positions[idx + 1])
skip_next = True
else:
reduced_syllables.append(syllable)
liaison_index.append(0)
liaison_positions = liaison_index
syllables = reduced_syllables
return clean_phonological_groups(
syllables, liaison_positions, liaison_property
)
def clean_phonological_groups(groups, liaison_positions, liaison_property):
"""Clean phonological groups so their liaison property is consistently set
according to the the liaison positions
:param groups: Phonological groups to be cleaned
:param liaison_positions: Positions of the liaisons
:param liaison_property: The liaison type (synaeresis or synalepha)
:return: Cleaned phonological groups
:rtype: dict
"""
clean_groups = []
for idx, group in enumerate(groups):
if liaison_property in group:
clean_groups.append({
**group, liaison_property: bool(liaison_positions[idx])
})
else:
clean_groups.append(group)
return clean_groups
def get_rhythmical_pattern(phonological_groups, rhythm_format="pattern"):
"""Gets a rhythm pattern for a poem in either "pattern": "-++-+-+-"
"binary": "01101010" or "indexed": [1,2,4,6] format
:param phonological_groups: a dictionary with the syllables of the line
:param rhythm_format: The output format for the rhythm
:return: Dictionary with with rhythm and phonologic groups
:rtype: dict
"""
stresses = get_stresses(phonological_groups)
stress = format_stress(stresses, rhythm_format)
return {
"stress": stress,
"type": rhythm_format,
"length": len(stresses)
}
def get_stresses(phonological_groups):
"""Gets a list of stress marks (`True` for stressed, `False` for unstressed)
from a list of phonological groups applying rules depending on the ending
stress.
:param phonological_groups: a dictionary with the phonological groups
(syllables) of the line
:return: List of boolean values indicating whether a group is
stressed (`True`) or not (`False`)
:rtype: list
"""
stresses = []
last_word_syllables = []
for group in phonological_groups:
stresses.append(group["is_stressed"])
for group in phonological_groups:
last_word_syllables.append(group.get("is_word_end", False))
# Get position for the last syllable of the penultimate word
if last_word_syllables.count(True) > 1:
penultimate_word = -(
[i for i, n in enumerate(last_word_syllables[::-1]) if n][1] + 1)
else:
penultimate_word = None
last_stress = -(stresses[::-1].index(True) + 1)
# Oxytone (Aguda)
if last_stress == -1:
stresses.append(False)
# Paroxytone (Esdrújula) or Proparoxytone (Sobreesdrújula)
elif last_stress <= -3:
if penultimate_word is None:
stresses.pop()
elif last_stress > penultimate_word:
stresses.pop()
return stresses
def format_stress(stresses, rhythm_format="pattern", indexed_separator="-"):
"""Converts a list of boolean elements into a string that matches the chosen
rhythm format:
"indexed": 2,5,8
"pattern": -++--+-+-
"binary": 01101001
:param stresses: List of boolean elements representing stressed syllables
:param rhythm_format: Format to be used: indexed, pattern, or binary
:param indexed_separator: String to use as a separator for indexed pattern
:return: String with the stress pattern
:rtype: str
"""
separator = ""
if rhythm_format == 'indexed':
stresses = [
str(index + 1) for index, stress in enumerate(stresses) if stress
]
separator = indexed_separator
elif rhythm_format == 'binary':
stresses = map(lambda stress: str(int(stress)), stresses)
else: # rhythm_format == 'pattern':
stresses = map(lambda stress: "+" if stress else "-", stresses)
return separator.join(stresses)
"""
Syllabifier functions
"""
def apply_exception_rules(word):
"""Applies presyllabification rules to a word,
based on <NAME>'s work
:param word: A string to be checked for exceptions
:return: A string with the presyllabified word
:rtype: str
"""
# Vowel + w + vowel group
if W_VOWEL_GROUP.match(word):
match = W_VOWEL_GROUP.search(word)
if match is not None:
word = "-".join(match.groups())
# Consonant groups with exceptions for LL and DL
if CONSONANT_GROUP.match(word):
match = CONSONANT_GROUP.search(word)
if match is not None:
word = "-".join(match.groups())
if CONSONANT_GROUP_EXCEPTION_LL.match(word):
match = CONSONANT_GROUP_EXCEPTION_LL.search(word)
if match is not None:
word = "-".join(match.groups())
if CONSONANT_GROUP_EXCEPTION_DL.match(word):
match = CONSONANT_GROUP_EXCEPTION_DL.search(word)
if match is not None:
word = "-".join(match.groups())
# Prefix 'sin' followed by consonant
if PREFIX_SIN_WITH_CONSONANT_RE.match(word):
match = PREFIX_SIN_WITH_CONSONANT_RE.search(word)
if match is not None:
word = "-".join(match.groups())
# Prefix 'des' followed by consonant
if PREFIX_DES_WITH_CONSONANT_RE.match(word):
match = PREFIX_DES_WITH_CONSONANT_RE.search(word)
if match is not None:
word = "-".join(match.groups())
return word
def apply_exception_rules_post(word):
"""Applies presyllabification rules to a word,
based on <NAME>'s work
:param word: A string to be checked for exceptions
:return: A string with the presyllabified word with hyphens
:rtype: str
"""
# We make one pass for every match found so we can perform
# several substitutions
matches = HIATUS_FIRST_VOWEL_RE.findall(word)
if matches:
for _ in matches[0]:
word = re.sub(HIATUS_FIRST_VOWEL_RE, r'\1\2-\3', word)
regexes = (CONSONANT_CLUSTER_RE, LOWERING_DIPHTHONGS_WITH_H,
RAISING_DIPHTHONGS_WITH_H)
for regex in regexes:
matches = regex.findall(word)
if matches:
for _ in matches[0]:
word = re.sub(regex, r'\1\2\3', word)
return word
def syllabify(word, alternative_syllabification=False):
"""Syllabifies a word.
:param word: The word to be syllabified.
:param alternative_syllabification: Wether or not the alternative
syllabification is used
:return: List of syllables and exceptions where appropriate.
:rtype: list
"""
output = ""
original_word = word
# Checks if word exists on the foreign words dictionary
if word in SYLLABIFICATOR_FOREIGN_WORDS_DICT:
output = SYLLABIFICATOR_FOREIGN_WORDS_DICT[word]
else:
word = apply_exception_rules(word)
while len(word) > 0:
output += word[0]
# Returns first matching pattern.
m = letter_clusters_re.search(word)
if m is not None:
# Adds hyphen to syllables if regex pattern is not 5, 8, 11
output += "-" if m.lastindex not in {5, 8, 11} else ""
word = word[1:]
output = apply_exception_rules_post(output)
# Remove empty elements created during syllabification
output = list(filter(bool, output.split("-")))
if (alternative_syllabification
and original_word.lower() in ALTERNATIVE_SYLLABIFICATION):
return ALTERNATIVE_SYLLABIFICATION[original_word.lower()][1][0]
else:
return (output,
ALTERNATIVE_SYLLABIFICATION.get(original_word, (None, ()))[1])
def get_orthographic_accent(syllable_list):
"""Given a list of str representing syllables,
return position in the list of a syllable bearing
orthographic stress (with the acute accent mark in Spanish)
:param syllable_list: list of syllables as str or unicode each
:return: Position | |
<reponame>bio-boris/ExpressionUtils<gh_stars>0
# -*- coding: utf-8 -*-
#BEGIN_HEADER
import os
import sys
import time
import shutil
import glob
import logging
from datetime import datetime
from pprint import pprint
from pprint import pformat
from installed_clients.DataFileUtilClient import DataFileUtil
from installed_clients.baseclient import ServerError as DFUError
from installed_clients.WorkspaceClient import Workspace
from installed_clients.baseclient import ServerError as WorkspaceError
from installed_clients.ReadsAlignmentUtilsClient import ReadsAlignmentUtils
from core.expression_utils import ExpressionUtils as Expression_Utils
from core.table_maker import TableMaker
from core.exprMatrix_utils import ExprMatrixUtils
#END_HEADER
class ExpressionUtils:
'''
Module Name:
ExpressionUtils
Module Description:
'''
######## WARNING FOR GEVENT USERS ####### noqa
# Since asynchronous IO can lead to methods - even the same method -
# interrupting each other, you must be *very* careful when using global
# state. A method could easily clobber the state set by another while
# the latter method is running.
######################################### noqa
VERSION = "0.1.2"
GIT_URL = "<EMAIL>:sean-mccorkle/ExpressionUtils.git"
GIT_COMMIT_HASH = "d18d7ff875c006e2b6cb6a3888a3e8d507e3269d"
#BEGIN_CLASS_HEADER
PARAM_IN_SRC_DIR = 'source_dir'
PARAM_IN_SRC_REF = 'source_ref'
PARAM_IN_DST_REF = 'destination_ref'
PARAM_IN_ALIGNMENT_REF = 'alignment_ref'
PARAM_IN_GENOME_REF = 'genome_ref'
PARAM_IN_ANNOTATION_ID = 'annotation_id'
PARAM_IN_BAM_FILE_PATH = 'bam_file_path'
PARAM_IN_DESCRIPTION = 'description'
PARAM_IN_DATA_QUAL_LEVEL = 'data_quality_level'
PARAM_IN_PROC_COMMENTS = 'processing_comments'
PARAM_IN_PLATFORM = 'platform'
PARAM_IN_MAPPED_SAMPLE_ID = 'mapped_sample_id'
PARAM_IN_ORIG_MEDIAN = 'original_median'
PARAM_IN_EXT_SRC_DATE = 'external_source_date'
PARAM_IN_TRANSCRIPTS = 'transcripts'
PARAM_IN_SRC = 'source'
def _check_required_param(self, in_params, param_list):
"""
Check if each of the params in the list are in the input params
"""
for param in param_list:
if (param not in in_params or not in_params[param]):
raise ValueError('{} parameter is required'.format(param))
def _proc_ws_obj_params(self, ctx, params):
"""
Check the validity of workspace and object params and return them
"""
dst_ref = params.get(self.PARAM_IN_DST_REF)
ws_name_id, obj_name_id = os.path.split(dst_ref)
if not bool(ws_name_id.strip()) or ws_name_id == '/':
raise ValueError("Workspace name or id is required in " + self.PARAM_IN_DST_REF)
if not bool(obj_name_id.strip()):
raise ValueError("Object name or id is required in " + self.PARAM_IN_DST_REF)
if not isinstance(ws_name_id, int):
try:
ws_name_id = self.dfu.ws_name_to_id(ws_name_id)
except DFUError as se:
prefix = se.message.split('.')[0]
raise ValueError(prefix)
self.__LOGGER.info('Obtained workspace name/id ' + str(ws_name_id))
return ws_name_id, obj_name_id
def _proc_upload_expression_params(self, ctx, params):
"""
Check the presence and validity of upload expression params
"""
self._check_required_param(params, [self.PARAM_IN_DST_REF,
self.PARAM_IN_SRC_DIR,
self.PARAM_IN_ALIGNMENT_REF
])
ws_name_id, obj_name_id = self._proc_ws_obj_params(ctx, params)
source_dir = params.get(self.PARAM_IN_SRC_DIR)
if not (os.path.isdir(source_dir)):
raise ValueError('Source directory does not exist: ' + source_dir)
if not os.listdir(source_dir):
raise ValueError('Source directory is empty: ' + source_dir)
return ws_name_id, obj_name_id, source_dir
def _get_ws_info(self, obj_ref):
ws = Workspace(self.ws_url)
try:
info = ws.get_object_info_new({'objects': [{'ref': obj_ref}]})[0]
except WorkspaceError as wse:
self.__LOGGER.error('Logging workspace exception')
self.__LOGGER.error(str(wse))
raise
return info
def _get_genome_ref(self, assembly_or_genome_ref, params):
if self.PARAM_IN_GENOME_REF in params and params[self.PARAM_IN_GENOME_REF] is not None:
return params[self.PARAM_IN_GENOME_REF]
obj_type = self._get_ws_info(assembly_or_genome_ref)[2]
if obj_type.startswith('KBaseGenomes.Genome'):
return assembly_or_genome_ref
raise ValueError('Alignment object does not contain genome_ref; '
'"{}" parameter is required'.format(self.PARAM_IN_GENOME_REF))
def _get_expression_levels(self, source_dir, genome_ref, transcripts=False):
fpkm_file_path = os.path.join(source_dir, 'genes.fpkm_tracking')
if transcripts:
fpkm_file_path = os.path.join(source_dir, 't_data.ctab')
if not os.path.isfile(fpkm_file_path):
raise ValueError('{} file is required'.format(fpkm_file_path))
id_col = 5 if transcripts else 0
self.__LOGGER.info('Generating expression levels from {}'
.format(fpkm_file_path))
return self.expression_utils.get_expression_levels(fpkm_file_path,
genome_ref, id_col)
def _gen_ctab_files(self, params, alignment_ref):
source_dir = params.get(self.PARAM_IN_SRC_DIR)
if len(glob.glob(source_dir + '/*.ctab')) < 5:
self.__LOGGER.info(' ======= Generating ctab files ==========')
gtf_file = os.path.join(source_dir, 'transcripts.gtf')
if not os.path.isfile(gtf_file):
raise ValueError("{} file is required to generate ctab files, found missing".
format(gtf_file))
if self.PARAM_IN_BAM_FILE_PATH in params and \
params[self.PARAM_IN_BAM_FILE_PATH] is not None:
bam_file_path = params[self.PARAM_IN_BAM_FILE_PATH]
else:
self.__LOGGER.info('Downloading bam file from alignment object')
rau = ReadsAlignmentUtils(self.callback_url)
alignment_retVal = rau.download_alignment({'source_ref': alignment_ref})
alignment_dir = alignment_retVal.get('destination_dir')
allbamfiles = glob.glob(alignment_dir + '/*.bam')
if len(allbamfiles) == 0:
raise ValueError('bam file does not exist in {}'.format(d))
elif len(allbamfiles) == 1:
bam_file_path = allbamfiles[0]
elif len(allbamfiles) > 1:
tmp_file_path = os.path.join(alignment_dir, 'accepted_hits.bam')
if os.path.isfile(tmp_file_path):
bam_file_path = tmp_file_path
else:
tmp_file_path = os.path.join(alignment_dir, 'accepted_hits_sorted.bam')
if os.path.isfile(tmp_file_path):
bam_file_path = tmp_file_path
else:
raise ValueError('accepted_hits.bam, accepted_hits_sorted.bam or other bam file not found in {}'.
format(alignment_dir))
result = self.table_maker.build_ctab_files(
ref_genome_path=gtf_file,
alignment_path=bam_file_path,
output_dir=source_dir)
if result != 0:
raise ValueError('Tablemaker failed')
#END_CLASS_HEADER
# config contains contents of config file in a hash or None if it couldn't
# be found
def __init__(self, config):
#BEGIN_CONSTRUCTOR
self.__LOGGER = logging.getLogger('ExpressionUtils')
self.__LOGGER.setLevel(logging.INFO)
streamHandler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter(
"%(asctime)s - %(filename)s - %(lineno)d - %(levelname)s - %(message)s")
formatter.converter = time.gmtime
streamHandler.setFormatter(formatter)
self.__LOGGER.addHandler(streamHandler)
self.__LOGGER.info("Logger was set")
self.config = config
self.scratch = config['scratch']
self.callback_url = os.environ['SDK_CALLBACK_URL']
self.ws_url = config['workspace-url']
self.config['SDK_CALLBACK_URL'] = self.callback_url
self.expression_utils = Expression_Utils(self.config)
self.dfu = DataFileUtil(self.callback_url)
self.table_maker = TableMaker(config, self.__LOGGER)
self.expr_matrix_utils = ExprMatrixUtils(config, self.__LOGGER)
#END_CONSTRUCTOR
pass
def upload_expression(self, ctx, params):
"""
Uploads the expression *
:param params: instance of type "UploadExpressionParams" (*
Required input parameters for uploading a reads expression data
string destination_ref - object reference of expression
data. The object ref is 'ws_name_or_id/obj_name_or_id' where
ws_name_or_id is the workspace name or id and obj_name_or_id is
the object name or id string source_dir -
directory with the files to be uploaded string alignment_ref
- alignment workspace object reference *) -> structure:
parameter "destination_ref" of String, parameter "source_dir" of
String, parameter "alignment_ref" of String, parameter
"genome_ref" of String, parameter "annotation_id" of String,
parameter "bam_file_path" of String, parameter "transcripts" of
type "boolean" (A boolean - 0 for false, 1 for true. @range (0,
1)), parameter "data_quality_level" of Long, parameter
"original_median" of Double, parameter "description" of String,
parameter "platform" of String, parameter "source" of String,
parameter "external_source_date" of String, parameter
"processing_comments" of String
:returns: instance of type "UploadExpressionOutput" (* Output
from upload expression *) -> structure: parameter "obj_ref" of
String
"""
# ctx is the context object
# return variables are: returnVal
#BEGIN upload_expression
self.__LOGGER.info('Starting upload expression, parsing parameters ')
pprint(params)
ws_name_id, obj_name_id, source_dir = self._proc_upload_expression_params(ctx, params)
alignment_ref = params.get(self.PARAM_IN_ALIGNMENT_REF)
try:
alignment_obj = self.dfu.get_objects({'object_refs': [alignment_ref]})['data'][0]
except DFUError as e:
self.__LOGGER.error('Logging stacktrace from workspace exception:\n' + e.data)
raise
alignment = alignment_obj['data']
assembly_or_genome_ref = alignment['genome_id']
genome_ref = self._get_genome_ref(assembly_or_genome_ref, params)
expression_levels, tpm_expression_levels = self._get_expression_levels(
source_dir, genome_ref, params.get(self.PARAM_IN_TRANSCRIPTS))
self._gen_ctab_files(params, alignment_ref)
uploaded_file = self.dfu.file_to_shock({'file_path': source_dir,
'make_handle': 1,
'pack': 'zip'
})
"""
move the zipfile created in the source directory one level up
"""
path, dir = os.path.split(source_dir)
zipfile = dir + '.zip'
if os.path.isfile(os.path.join(source_dir, zipfile)):
shutil.move(os.path.join(source_dir, zipfile), os.path.join(path, zipfile))
file_handle = uploaded_file['handle']
file_size = uploaded_file['size']
expression_data = {
'numerical_interpretation': 'FPKM',
'genome_id': genome_ref,
'mapped_rnaseq_alignment': {alignment['read_sample_id']: alignment_ref},
'condition': alignment['condition'],
'file': file_handle,
'expression_levels': expression_levels,
'tpm_expression_levels': tpm_expression_levels
}
additional_params = [self.PARAM_IN_ANNOTATION_ID,
self.PARAM_IN_DESCRIPTION,
self.PARAM_IN_DATA_QUAL_LEVEL,
self.PARAM_IN_PLATFORM,
self.PARAM_IN_PROC_COMMENTS,
self.PARAM_IN_MAPPED_SAMPLE_ID,
self.PARAM_IN_ORIG_MEDIAN,
self.PARAM_IN_EXT_SRC_DATE,
self.PARAM_IN_SRC
]
for opt_param in additional_params:
if opt_param in params and params[opt_param] is not None:
expression_data[opt_param] = params[opt_param]
extra_provenance_input_refs = list()
extra_provenance_input_refs.append(params.get(self.PARAM_IN_ALIGNMENT_REF))
if self.PARAM_IN_GENOME_REF in params and params.get(self.PARAM_IN_GENOME_REF) is not None:
extra_provenance_input_refs.append(params.get(self.PARAM_IN_GENOME_REF))
self.__LOGGER.info('=========== Adding extra_provenance_refs')
self.__LOGGER.info(str(extra_provenance_input_refs))
self.__LOGGER.info('==========================================')
res = self.dfu.save_objects(
{"id": ws_name_id,
"objects": [{
"type": "KBaseRNASeq.RNASeqExpression",
"data": expression_data,
"name": obj_name_id,
"extra_provenance_input_refs": extra_provenance_input_refs
}
]})[0]
self.__LOGGER.info('save complete')
returnVal = {'obj_ref': str(res[6]) + '/' + str(res[0]) + '/' + str(res[4])}
self.__LOGGER.info('Uploaded object: ')
print(returnVal)
#END upload_expression
# At some point might do deeper type checking...
if not isinstance(returnVal, dict):
raise ValueError('Method upload_expression return value ' +
'returnVal is not type dict as required.')
# return the results
return [returnVal]
def download_expression(self, ctx, params):
"""
Downloads expression *
:param params: instance of type "DownloadExpressionParams" (*
Required input parameters for downloading expression string
source_ref - object reference of expression source.
The object ref is 'ws_name_or_id/obj_name_or_id' where
ws_name_or_id is the workspace name or id and obj_name_or_id is
the object name or id *) -> structure: parameter "source_ref" of
String
:returns: instance of type "DownloadExpressionOutput" (* The output
of the download method. *) -> structure: parameter
"destination_dir" of String
"""
# ctx is the context object
# return variables are: returnVal
#BEGIN download_expression
self.__LOGGER.info('Running download_expression with params:\n' +
pformat(params))
inref = params.get(self.PARAM_IN_SRC_REF)
if not inref:
raise ValueError(self.PARAM_IN_SRC_REF + ' parameter is required')
try:
expression = self.dfu.get_objects({'object_refs': [inref]})['data']
except DFUError as e:
self.__LOGGER.error('Logging stacktrace from workspace exception:\n' + e.data)
raise
# set the output dir
timestamp = int((datetime.utcnow() - datetime.utcfromtimestamp(0)).total_seconds() * 1000)
output_dir = os.path.join(self.scratch, 'download_' + str(timestamp))
os.mkdir(output_dir)
file_ret = self.dfu.shock_to_file({
'shock_id': expression[0]['data']['file']['id'],
'file_path': output_dir,
'unpack': 'unpack'
})
if not os.listdir(output_dir):
raise ValueError('No files were downloaded: ' + output_dir)
for f in glob.glob(output_dir + '/*.zip'):
os.remove(f)
returnVal = {'destination_dir': output_dir}
#END download_expression
# At some point might do deeper type checking...
if not isinstance(returnVal, dict):
raise ValueError('Method download_expression return value ' +
'returnVal is not type dict as required.')
# | |
<filename>Tugas EAS/Soal1.py
# EVALUASI AKHR SEMESTER GENAP 2019
# Membuat simulasi Program Kontrol Ruangan Informatika mulai Q301 sd Q307
# Agar nyaman dan efisien penggunakan daya listrik, keawetan sarana
# Menampilkan ruang Q301 sd Q307 dengan status & Keterangan sebagi contoh berikut;
# Q301: AC Nyala, Lampu Nyala, LCD Nyala, Ada Orang, Ada Kuliah
# Q302: AC Nyala, Lampu Mati, LCD Mati, Ada Orang, Kuliah Kosong
# Q303: AC Mati, Lampu Mati, LCD Mati, Kosong, Kuliah Kosong
# dll
# EAS Komputasi Paralel Klas:S =======================
# NBI: 1461600045 Nama: <NAME>
#-----------------------------------------------------
import time
from threading import Thread
from random import randint
from queue import Queue
# Mengukur Suhu Ruangan yang ber-AC
# simulasi int antara 18 sd 40, untuk ruang kelas dijaga 25-28 derajat Celcius
def BacaSuhuRuang(output_queu):
while True:
data = {
0 : "suhuRuang",
1 : [
['Q301', randint(18, 40)],
['Q302', randint(18, 40)],
['Q303', randint(18, 40)],
['Q304', randint(18, 40)],
['Q305', randint(18, 40)],
['Q306', randint(18, 40)],
['Q307', randint(18, 40)],
]
}
output_queu.put(data)
time.sleep(1)
# Mengukur keberadaan manusia dengan Sensor Suhu Tubuh manusia
# simulasi int antara 18 sd 40, untuk manusia normal suhunya 33,2-38,2°C
def BacaSuhuTubuh(output_queu):
while True:
data = {
0 : "suhuTubuh",
1 : [
['Q301', randint(18, 40)],
['Q302', randint(18, 40)],
['Q303', randint(18, 40)],
['Q304', randint(18, 40)],
['Q305', randint(18, 40)],
['Q306', randint(18, 40)],
['Q307', randint(18, 40)],
]
}
output_queu.put(data)
time.sleep(1)
# Mengukur cahaya (Luminasi dg satuan Lux)
# simulasi int antara 1 sd 10000, untuk kantor/ruang kelas dijaga 320 - 500 lux
def BacaLuminasiRuang(output_queu):
while True:
data = {
0 : "luminasiRuang",
1 : [
['Q301', randint(1, 10000)],
['Q302', randint(1, 10000)],
['Q303', randint(1, 10000)],
['Q304', randint(1, 10000)],
['Q305', randint(1, 10000)],
['Q306', randint(1, 10000)],
['Q307', randint(1, 10000)],
]
}
output_queu.put(data)
time.sleep(1)
def BacaJamRuang(output_queu):
while True:
data = {
0 : "jamRuang",
1 : [
['Q301', randint(1, 24)],
['Q302', randint(1, 24)],
['Q303', randint(1, 24)],
['Q304', randint(1, 24)],
['Q305', randint(1, 24)],
['Q306', randint(1, 24)],
['Q307', randint(1, 24)],
]
}
output_queu.put(data)
time.sleep(1)
#Aturan dan Data Aktifitas Kantor adalah sbb;
# Sumulasi jam akan berjalan dari 1 sd 24.
# Q307 beroperasi sesuai jam kantor Senin sd Jumat pk.7.00 sd 21.00
# Setiap ada jadwal kuliah LCD pasti menyala, dan dimatikan selesai kuliah
# Lampu kelas dan AC akan dimatikan jika tidak ada mahasiswa didalamnya
# Q301 sd Q306 digunakan sesuai Jadwal kuliah dengan efisien dg simulasi sbb
# Jadwal=[[‘Q307’,7,21], ['Q302',7,9,1,1], ['Q305',8,11,1,0], ['Q306',10,12,1,0], [‘Q303’, 11.13 ,1,1]]
# Contoh Q305: ada jadwal kuliah pk 8 sd 11, 1:ada mhs, Kuliah kosong
# Mahasiswa Informatika sangat antusias dan penuh semangat sehingga setiap ada ruang selalu
# digunakan untuk kegiatan akademis seperti keminitas Pytho, DSI, Robot, C++, dll sehingga mereka #memiliki jadwal sendiri-sendiri yang terintegrasi.
# Dalam hal ini disimulasikan sbb;
# JadwalMhs=[ [‘Q305’,11,13, 1], [‘Q304’, 10.13, 0] ]
# Contoh: diruang Q304 mulai pk 10 sd 13 rencana ada kegiatan mhs akan tetapi tidak ada mhsnya
jam = 1
data = [None, None, None, None]
def MasterControl(input_queu):
while True:
dt = input_queu.get()
sensor = dt[0]
value = dt[1]
if sensor=="suhuRuang":
setData(0, value)
elif sensor=="suhuTubuh":
setData(1, value)
elif sensor=="luminasiRuang":
setData(2, value)
elif sensor=="jamRuang":
setData(3, value)
global data, jam
if None in data:
continue
print(" ")
print("## Jam ", jam, " ##", sep="")
print(" ")
for i in range(len(data[0])):
suhuRuang = data[0][i]
suhuTubuh = data[1][i]
luminasiRuang = data[2][i]
jamRuang = data[3][i]
ruang = "Q30"+str(i+1)
# adaKuliah = False
# for j in range(len(jadwal)):
if ruang == "Q302" and jamRuang == 7 :
if suhuRuang >= 28 and suhuTubuh >= 38.2 and luminasiRuang >= 500 :
print ("Q302 : ", " AC Mati, Lampu Nyala, LCD Nyala, Ada Orang, Ada Mahasiswa, Ada Kuliah.")
elif suhuRuang >= 28 and suhuTubuh >= 38.2 and luminasiRuang < 320 :
print ("Q302 : ", " AC Mati, Lampu Mati, LCD Nyala, Ada Orang, Ada Mahasiswa, Ada Kuliah.")
elif suhuRuang < 25 and suhuTubuh >= 38.2 and luminasiRuang >= 500 :
print ("Q302 : ", " AC Menyala, Lampu Nyala, LCD Nyala, Ada Orang, Ada Mahasiswa, Ada Kuliah.")
elif suhuRuang < 25 and suhuTubuh >= 38.2 and luminasiRuang < 320 :
print ("Q302 : ", " AC Menyala, Lampu Mati, LCD Nyala, Ada Orang, Ada Mahasiswa, Ada Kuliah.")
elif ruang == "Q302" and jamRuang == 8 :
if suhuRuang >= 28 and suhuTubuh >= 38.2 and luminasiRuang >= 500 :
print ("Q302 : ", " AC Mati, Lampu Nyala, LCD Nyala, Ada Orang, Ada Mahasiswa, Ada Kuliah.")
elif suhuRuang >= 28 and suhuTubuh >= 38.2 and luminasiRuang < 320 :
print ("Q302 : ", " AC Mati, Lampu Mati, LCD Nyala, Ada Orang, Ada Mahasiswa, Ada Kuliah.")
elif suhuRuang < 25 and suhuTubuh >= 38.2 and luminasiRuang >= 500 :
print ("Q302 : ", " AC Menyala, Lampu Nyala, LCD Nyala, Ada Orang, Ada Mahasiswa, Ada Kuliah.")
elif suhuRuang < 25 and suhuTubuh >= 38.2 and luminasiRuang < 320 :
print ("Q302 : ", " AC Menyala, Lampu Mati, LCD Nyala, Ada Orang, Ada Mahasiswa, Ada Kuliah.")
elif ruang == "Q302" and jamRuang == 9 :
if suhuRuang >= 28 and suhuTubuh >= 38.2 and luminasiRuang >= 500 :
print ("Q302 : ", " AC Mati, Lampu Nyala, LCD Nyala, Ada Orang, Ada Mahasiswa, Ada Kuliah.")
elif suhuRuang >= 28 and suhuTubuh >= 38.2 and luminasiRuang < 320 :
print ("Q302 : ", " AC Mati, Lampu Mati, LCD Nyala, Ada Orang, Ada Mahasiswa, Ada Kuliah.")
elif suhuRuang < 25 and suhuTubuh >= 38.2 and luminasiRuang >= 500 :
print ("Q302 : ", " AC Menyala, Lampu Nyala, LCD Nyala, Ada Orang, Ada Mahasiswa, Ada Kuliah.")
elif suhuRuang < 25 and suhuTubuh >= 38.2 and luminasiRuang < 320 :
print ("Q302 : ", " AC Menyala, Lampu Mati, LCD Nyala, Ada Orang, Ada Mahasiswa, Ada Kuliah.")
elif ruang == "Q303" and jamRuang == 11 :
if suhuRuang >= 28 and suhuTubuh >= 38.2 and luminasiRuang >= 500 :
print ("Q303 : ", " AC Mati, Lampu Nyala, LCD Nyala, Ada Orang, Ada Mahasiswa, Kosong.")
elif suhuRuang >= 28 and suhuTubuh >= 38.2 and luminasiRuang < 320 :
print ("Q303 : ", " AC Mati, Lampu Mati, LCD Nyala, Ada Orang, Ada Mahasiswa, Kosong.")
elif suhuRuang < 25 and suhuTubuh >= 38.2 and luminasiRuang >= 500 :
print ("Q303 : ", " AC Menyala, Lampu Nyala, LCD Nyala, Ada Orang, Ada Mahasiswa, Kosong.")
elif suhuRuang < 25 and suhuTubuh >= 38.2 and luminasiRuang < 320 :
print ("Q303 : ", " AC Menyala, Lampu Mati, LCD Nyala, Ada Orang, Ada Mahasiswa, Kosong.")
elif ruang == "Q303" and jamRuang == 12 :
if suhuRuang >= 28 and suhuTubuh >= 38.2 and luminasiRuang >= 500 :
print ("Q303 : ", " AC Mati, Lampu Nyala, LCD Nyala, Ada Orang, Ada Mahasiswa, Kosong.")
elif suhuRuang >= 28 and suhuTubuh >= 38.2 and luminasiRuang < 320 :
print ("Q303 : ", " AC Mati, Lampu Mati, LCD Nyala, Ada Orang, Ada Mahasiswa, Kosong.")
elif suhuRuang < 25 and suhuTubuh >= 38.2 and luminasiRuang >= 500 :
print ("Q303 : ", " AC Menyala, Lampu Nyala, LCD Nyala, Ada Orang, Ada Mahasiswa, Kosong.")
elif suhuRuang < 25 and suhuTubuh >= 38.2 and luminasiRuang < 320 :
print ("Q303 : ", " AC Menyala, Lampu Mati, LCD Nyala, Ada Orang, Ada Mahasiswa, Kosong.")
elif ruang == "Q303" and jamRuang == 13 :
if suhuRuang >= 28 and suhuTubuh >= 38.2 and luminasiRuang >= 500 :
print ("Q303 : ", " AC Mati, Lampu Nyala, LCD Nyala, Ada Orang, Ada Mahasiswa, Kosong.")
elif suhuRuang >= 28 and suhuTubuh >= 38.2 and luminasiRuang < 320 :
print ("Q303 : ", " AC Mati, Lampu Mati, LCD Nyala, Ada Orang, Ada Mahasiswa, Kosong.")
elif suhuRuang < 25 and suhuTubuh >= 38.2 and luminasiRuang >= 500 :
print ("Q303 : ", " AC Menyala, Lampu Nyala, LCD Nyala, Ada Orang, Ada Mahasiswa, Kosong.")
elif suhuRuang < 25 and suhuTubuh >= 38.2 and luminasiRuang < 320 :
print ("Q303 : ", " AC Menyala, Lampu Mati, LCD Nyala, Ada | |
'514194172':{'en': 'Claro'},
'514194173':{'en': 'Claro'},
'514194188':{'en': 'Movistar'},
'514194189':{'en': 'Movistar'},
'51419419':{'en': 'Movistar'},
'514294200':{'en': 'Movistar'},
'514294201':{'en': 'Movistar'},
'514294202':{'en': 'Movistar'},
'51429424':{'en': 'Movistar'},
'51429426':{'en': 'Movistar'},
'51429427':{'en': 'Claro'},
'51429429':{'en': 'Movistar'},
'51439430':{'en': 'Movistar'},
'51439431':{'en': 'Movistar'},
'514394320':{'en': 'Movistar'},
'514394321':{'en': 'Movistar'},
'514394322':{'en': 'Movistar'},
'514394323':{'en': 'Movistar'},
'514394324':{'en': 'Movistar'},
'514394330':{'en': 'Claro'},
'514394331':{'en': 'Claro'},
'514394332':{'en': 'Claro'},
'514394333':{'en': 'Claro'},
'514394334':{'en': 'Claro'},
'514394335':{'en': 'Claro'},
'51439434':{'en': 'Movistar'},
'51439435':{'en': 'Claro'},
'51439436':{'en': 'Movistar'},
'51439437':{'en': 'Claro'},
'514394378':{'en': 'Movistar'},
'514394379':{'en': 'Movistar'},
'51439438':{'en': 'Movistar'},
'514494801':{'en': 'Movistar'},
'514494802':{'en': 'Movistar'},
'514494803':{'en': 'Movistar'},
'514494804':{'en': 'Movistar'},
'514494805':{'en': 'Claro'},
'514494806':{'en': 'Claro'},
'514494807':{'en': 'Claro'},
'514494808':{'en': 'Claro'},
'514494809':{'en': 'Claro'},
'514494810':{'en': 'Movistar'},
'514494811':{'en': 'Movistar'},
'514494812':{'en': 'Movistar'},
'514494813':{'en': 'Movistar'},
'514494814':{'en': 'Movistar'},
'514494815':{'en': 'Movistar'},
'514494816':{'en': 'Movistar'},
'51449482':{'en': 'Claro'},
'51449483':{'en': 'Claro'},
'51449484':{'en': 'Movistar'},
'51449485':{'en': 'Movistar'},
'51449486':{'en': 'Movistar'},
'51449488':{'en': 'Movistar'},
'51449489':{'en': 'Movistar'},
'51449490':{'en': 'Movistar'},
'514494907':{'en': 'Claro'},
'514494908':{'en': 'Claro'},
'514494909':{'en': 'Claro'},
'51449491':{'en': 'Claro'},
'51449492':{'en': 'Claro'},
'51449493':{'en': 'Claro'},
'51449494':{'en': 'Movistar'},
'514494950':{'en': 'Movistar'},
'514494951':{'en': 'Movistar'},
'514494952':{'en': 'Movistar'},
'514494953':{'en': 'Movistar'},
'514494954':{'en': 'Movistar'},
'514494955':{'en': 'Movistar'},
'51449496':{'en': 'Movistar'},
'51449497':{'en': 'Claro'},
'514494978':{'en': 'Movistar'},
'514494979':{'en': 'Movistar'},
'51449498':{'en': 'Movistar'},
'514494991':{'en': 'Movistar'},
'514494995':{'en': 'Movistar'},
'514494996':{'en': 'Movistar'},
'514494997':{'en': 'Movistar'},
'514494998':{'en': 'Movistar'},
'514494999':{'en': 'Movistar'},
'515195080':{'en': 'Movistar'},
'515195081':{'en': 'Movistar'},
'515195082':{'en': 'Movistar'},
'515195083':{'en': 'Movistar'},
'51519509':{'en': 'Movistar'},
'51519510':{'en': 'Movistar'},
'51519511':{'en': 'Claro'},
'51519512':{'en': 'Claro'},
'51519513':{'en': 'Claro'},
'51519514':{'en': 'Movistar'},
'515195150':{'en': 'Movistar'},
'515195151':{'en': 'Movistar'},
'515195152':{'en': 'Movistar'},
'515195153':{'en': 'Movistar'},
'515195154':{'en': 'Movistar'},
'515195155':{'en': 'Movistar'},
'515195156':{'en': 'Movistar'},
'515195157':{'en': 'Movistar'},
'515195158':{'en': 'Movistar'},
'51519516':{'en': 'Movistar'},
'51519517':{'en': 'Claro'},
'51519518':{'en': 'Movistar'},
'515195190':{'en': 'Movistar'},
'515195191':{'en': 'Movistar'},
'515195193':{'en': 'Movistar'},
'515195194':{'en': 'Movistar'},
'515195195':{'en': 'Movistar'},
'515195196':{'en': 'Movistar'},
'515195197':{'en': 'Movistar'},
'515195198':{'en': 'Movistar'},
'515195199':{'en': 'Movistar'},
'51529523':{'en': 'Claro'},
'515295250':{'en': 'Movistar'},
'515295251':{'en': 'Movistar'},
'515295252':{'en': 'Movistar'},
'515295253':{'en': 'Movistar'},
'515295254':{'en': 'Movistar'},
'51529526':{'en': 'Movistar'},
'515295270':{'en': 'Claro'},
'515295271':{'en': 'Claro'},
'51529528':{'en': 'Movistar'},
'515295292':{'en': 'Movistar'},
'515295293':{'en': 'Movistar'},
'515295294':{'en': 'Movistar'},
'515295295':{'en': 'Movistar'},
'515295296':{'en': 'Movistar'},
'515295297':{'en': 'Movistar'},
'515295298':{'en': 'Movistar'},
'515295299':{'en': 'Movistar'},
'515395350':{'en': 'Claro'},
'515395352':{'en': 'Movistar'},
'515395361':{'en': 'Movistar'},
'515395363':{'en': 'Movistar'},
'515395364':{'en': 'Movistar'},
'515395366':{'en': 'Movistar'},
'515395367':{'en': 'Movistar'},
'515395368':{'en': 'Movistar'},
'515395370':{'en': 'Claro'},
'515395371':{'en': 'Claro'},
'515395372':{'en': 'Claro'},
'515395373':{'en': 'Claro'},
'515395374':{'en': 'Claro'},
'515395375':{'en': 'Claro'},
'515395376':{'en': 'Claro'},
'51539539':{'en': 'Movistar'},
'515495800':{'en': 'Movistar'},
'515495801':{'en': 'Movistar'},
'515495802':{'en': 'Movistar'},
'515495803':{'en': 'Movistar'},
'515495804':{'en': 'Movistar'},
'515495805':{'en': 'Claro'},
'515495806':{'en': 'Claro'},
'515495807':{'en': 'Claro'},
'515495808':{'en': 'Claro'},
'515495809':{'en': 'Claro'},
'515495810':{'en': 'Claro'},
'515495811':{'en': 'Claro'},
'515495820':{'en': 'Claro'},
'515495821':{'en': 'Claro'},
'515495823':{'en': 'Claro'},
'515495824':{'en': 'Claro'},
'515495825':{'en': 'Claro'},
'515495826':{'en': 'Claro'},
'515495827':{'en': 'Claro'},
'515495828':{'en': 'Claro'},
'515495829':{'en': 'Claro'},
'51549583':{'en': 'Claro'},
'515495840':{'en': 'Movistar'},
'515495841':{'en': 'Movistar'},
'515495842':{'en': 'Movistar'},
'515495843':{'en': 'Movistar'},
'51549585':{'en': 'Movistar'},
'51549586':{'en': 'Movistar'},
'51549587':{'en': 'Claro'},
'51549588':{'en': 'Movistar'},
'515495890':{'en': 'Movistar'},
'515495891':{'en': 'Movistar'},
'515495892':{'en': 'Movistar'},
'515495893':{'en': 'Movistar'},
'515495894':{'en': 'Movistar'},
'515495895':{'en': 'Claro'},
'515495896':{'en': 'Claro'},
'515495898':{'en': 'Claro'},
'51549590':{'en': 'Movistar'},
'51549591':{'en': 'Claro'},
'51549592':{'en': 'Claro'},
'515495928':{'en': 'Movistar'},
'515495929':{'en': 'Movistar'},
'51549593':{'en': 'Claro'},
'51549594':{'en': 'Movistar'},
'515495950':{'en': 'Movistar'},
'515495951':{'en': 'Movistar'},
'515495952':{'en': 'Movistar'},
'515495953':{'en': 'Movistar'},
'515495954':{'en': 'Movistar'},
'515495955':{'en': 'Movistar'},
'515495956':{'en': 'Movistar'},
'515495957':{'en': 'Movistar'},
'515495958':{'en': 'Movistar'},
'51549596':{'en': 'Movistar'},
'51549597':{'en': 'Claro'},
'515495980':{'en': 'Movistar'},
'515495981':{'en': 'Movistar'},
'515495982':{'en': 'Movistar'},
'515495984':{'en': 'Movistar'},
'515495985':{'en': 'Movistar'},
'515495986':{'en': 'Movistar'},
'515495987':{'en': 'Movistar'},
'515495988':{'en': 'Movistar'},
'515495989':{'en': 'Movistar'},
'515495991':{'en': 'Movistar'},
'515495992':{'en': 'Movistar'},
'515495996':{'en': 'Movistar'},
'515495997':{'en': 'Movistar'},
'515495998':{'en': 'Movistar'},
'515495999':{'en': 'Movistar'},
'51569560':{'en': 'Movistar'},
'515695610':{'en': 'Movistar'},
'515695611':{'en': 'Movistar'},
'515695612':{'en': 'Movistar'},
'515695613':{'en': 'Movistar'},
'515695614':{'en': 'Movistar'},
'515695615':{'en': 'Movistar'},
'515695616':{'en': 'Movistar'},
'515695620':{'en': 'Claro'},
'515695621':{'en': 'Claro'},
'515695622':{'en': 'Claro'},
'515695623':{'en': 'Claro'},
'515695624':{'en': 'Claro'},
'515695625':{'en': 'Claro'},
'515695626':{'en': 'Claro'},
'51569563':{'en': 'Claro'},
'51569564':{'en': 'Movistar'},
'51569565':{'en': 'Movistar'},
'51569566':{'en': 'Movistar'},
'51569567':{'en': 'Claro'},
'51569568':{'en': 'Movistar'},
'51569569':{'en': 'Movistar'},
'516196150':{'en': 'Movistar'},
'516196151':{'en': 'Movistar'},
'516196152':{'en': 'Movistar'},
'516196153':{'en': 'Movistar'},
'516196154':{'en': 'Movistar'},
'516196155':{'en': 'Movistar'},
'51619616':{'en': 'Movistar'},
'516196170':{'en': 'Claro'},
'516196171':{'en': 'Claro'},
'516196172':{'en': 'Claro'},
'516196173':{'en': 'Claro'},
'516196174':{'en': 'Claro'},
'516196175':{'en': 'Claro'},
'51619619':{'en': 'Movistar'},
'51619627':{'en': 'Claro'},
'516296250':{'en': 'Movistar'},
'516296251':{'en': 'Movistar'},
'516296252':{'en': 'Movistar'},
'516296253':{'en': 'Movistar'},
'516296254':{'en': 'Movistar'},
'51629626':{'en': 'Movistar'},
'51629629':{'en': 'Movistar'},
'516396360':{'en': 'Movistar'},
'516396361':{'en': 'Movistar'},
'516396362':{'en': 'Movistar'},
'516396363':{'en': 'Movistar'},
'516396364':{'en': 'Movistar'},
'516396365':{'en': 'Movistar'},
'516396369':{'en': 'Movistar'},
'516396370':{'en': 'Claro'},
'516396371':{'en': 'Claro'},
'516396372':{'en': 'Claro'},
'516396373':{'en': 'Claro'},
'516396390':{'en': 'Movistar'},
'516396391':{'en': 'Movistar'},
'516396392':{'en': 'Movistar'},
'516396393':{'en': 'Movistar'},
'516396394':{'en': 'Movistar'},
'516396395':{'en': 'Movistar'},
'516396398':{'en': 'Movistar'},
'516396399':{'en': 'Movistar'},
'516495410':{'en': 'Claro'},
'516495411':{'en': 'Claro'},
'516495412':{'en': 'Claro'},
'516495440':{'en': 'Movistar'},
'516495441':{'en': 'Movistar'},
'516495442':{'en': 'Movistar'},
'516495443':{'en': 'Movistar'},
'516495444':{'en': 'Movistar'},
'516495445':{'en': 'Movistar'},
'516495446':{'en': 'Movistar'},
'516495447':{'en': 'Movistar'},
'516495448':{'en': 'Movistar'},
'51649640':{'en': 'Movistar'},
'51649641':{'en': 'Claro'},
'51649642':{'en': 'Claro'},
'51649643':{'en': 'Claro'},
'51649644':{'en': 'Movistar'},
'51649645':{'en': 'Movistar'},
'51649646':{'en': 'Movistar'},
'51649647':{'en': 'Movistar'},
'51649648':{'en': 'Movistar'},
'516496490':{'en': 'Movistar'},
'516496491':{'en': 'Movistar'},
'516496492':{'en': 'Movistar'},
'516496496':{'en': 'Movistar'},
'516496497':{'en': 'Movistar'},
'516496498':{'en': 'Movistar'},
'516496499':{'en': 'Movistar'},
'516596530':{'en': 'Claro'},
'516596531':{'en': 'Claro'},
'516596532':{'en': 'Claro'},
'516596533':{'en': 'Claro'},
'516596534':{'en': 'Claro'},
'51659656':{'en': 'Movistar'},
'51659657':{'en': 'Claro'},
'516596590':{'en': 'Movistar'},
'516596591':{'en': 'Movistar'},
'516596592':{'en': 'Movistar'},
'516596596':{'en': 'Movistar'},
'516596597':{'en': 'Movistar'},
'516596598':{'en': 'Movistar'},
'516596599':{'en': 'Movistar'},
'516696600':{'en': 'Movistar'},
'51669666':{'en': 'Movistar'},
'51669667':{'en': 'Claro'},
'51669668':{'en': 'Movistar'},
'51669669':{'en': 'Movistar'},
'51679674':{'en': 'Claro'},
'516796765':{'en': 'Movistar'},
'516796766':{'en': 'Movistar'},
'516796768':{'en': 'Movistar'},
'516796769':{'en': 'Movistar'},
'516796770':{'en': 'Claro'},
'516796771':{'en': 'Claro'},
'516796772':{'en': 'Claro'},
'516796773':{'en': 'Claro'},
'516796798':{'en': 'Movistar'},
'517297260':{'en': 'Movistar'},
'517297261':{'en': 'Movistar'},
'517297262':{'en': 'Movistar'},
'517297263':{'en': 'Movistar'},
'517297268':{'en': 'Movistar'},
'517297269':{'en': 'Movistar'},
'517297270':{'en': 'Claro'},
'517297271':{'en': 'Claro'},
'517297272':{'en': 'Claro'},
'517297273':{'en': 'Claro'},
'51729728':{'en': 'Movistar'},
'517297290':{'en': 'Movistar'},
'517297291':{'en': 'Movistar'},
'517297292':{'en': 'Movistar'},
'517297296':{'en': 'Movistar'},
'517297297':{'en': 'Movistar'},
'517297298':{'en': 'Movistar'},
'517396840':{'en': 'Movistar'},
'517396841':{'en': 'Movistar'},
'517396842':{'en': 'Movistar'},
'517396843':{'en': 'Movistar'},
'517396844':{'en': 'Movistar'},
'517396870':{'en': 'Claro'},
'517396871':{'en': 'Claro'},
'517396872':{'en': 'Claro'},
'517396873':{'en': 'Claro'},
'517396875':{'en': 'Claro'},
'517396876':{'en': 'Claro'},
'517396877':{'en': 'Claro'},
'517396878':{'en': 'Claro'},
'517396879':{'en': 'Claro'},
'51739688':{'en': 'Movistar'},
'51739689':{'en': 'Movistar'},
'51739690':{'en': 'Movistar'},
'51739691':{'en': 'Movistar'},
'51739692':{'en': 'Movistar'},
'51739694':{'en': 'Movistar'},
'517396950':{'en': 'Movistar'},
'517396951':{'en': 'Movistar'},
'517396952':{'en': 'Movistar'},
'517396953':{'en': 'Movistar'},
'517396954':{'en': 'Movistar'},
'517396955':{'en': 'Movistar'},
'517396956':{'en': 'Movistar'},
'517396957':{'en': 'Movistar'},
'51739696':{'en': 'Movistar'},
'51739698':{'en': 'Movistar'},
'517396990':{'en': 'Movistar'},
'517396991':{'en': 'Movistar'},
'517396994':{'en': 'Movistar'},
'517396995':{'en': 'Movistar'},
'517396996':{'en': 'Movistar'},
'517396997':{'en': 'Movistar'},
'517396998':{'en': 'Movistar'},
'517396999':{'en': 'Movistar'},
'517497840':{'en': 'Movistar'},
'517497841':{'en': 'Movistar'},
'517497842':{'en': 'Movistar'},
'517497843':{'en': 'Movistar'},
'517497870':{'en': 'Claro'},
'517497871':{'en': 'Claro'},
'517497872':{'en': 'Claro'},
'517497873':{'en': 'Claro'},
'517497874':{'en': 'Claro'},
'517497875':{'en': 'Claro'},
'517497876':{'en': 'Claro'},
'517497877':{'en': 'Claro'},
'51749788':{'en': 'Movistar'},
'51749789':{'en': 'Movistar'},
'51749790':{'en': 'Movistar'},
'51749791':{'en': 'Movistar'},
'51749792':{'en': 'Movistar'},
'51749793':{'en': 'Claro'},
'517497956':{'en': 'Movistar'},
'517497957':{'en': 'Movistar'},
'51749796':{'en': 'Movistar'},
'51749797':{'en': 'Claro'},
'51749798':{'en': 'Movistar'},
'517497990':{'en': 'Movistar'},
'517497991':{'en': 'Movistar'},
'517497992':{'en': 'Movistar'},
'517497993':{'en': 'Movistar'},
'517497996':{'en': 'Movistar'},
'517497997':{'en': 'Movistar'},
'517497998':{'en': 'Movistar'},
'517497999':{'en': 'Movistar'},
'517697600':{'en': 'Movistar'},
'517697601':{'en': 'Movistar'},
'517697602':{'en': 'Movistar'},
'517697603':{'en': 'Movistar'},
'517697604':{'en': 'Movistar'},
'517697605':{'en': 'Movistar'},
'517697606':{'en': 'Movistar'},
'517697621':{'en': 'Claro'},
'517697622':{'en': 'Claro'},
'51769763':{'en': 'Claro'},
'51769764':{'en': 'Movistar'},
'517697650':{'en': 'Movistar'},
'517697651':{'en': 'Movistar'},
'517697652':{'en': 'Movistar'},
'517697653':{'en': 'Movistar'},
'517697654':{'en': 'Movistar'},
'517697655':{'en': 'Movistar'},
'517697656':{'en': 'Movistar'},
'517697657':{'en': 'Movistar'},
'517697658':{'en': 'Movistar'},
'51769766':{'en': 'Movistar'},
'51769767':{'en': 'Movistar'},
'51769768':{'en': 'Movistar'},
'517697692':{'en': 'Movistar'},
'517697693':{'en': 'Movistar'},
'517697694':{'en': 'Movistar'},
'517697695':{'en': 'Movistar'},
'517697696':{'en': 'Movistar'},
'517697697':{'en': 'Movistar'},
'517697698':{'en': 'Movistar'},
'517697699':{'en': 'Movistar'},
'518298230':{'en': 'Claro'},
'518298231':{'en': 'Claro'},
'518298232':{'en': 'Claro'},
'518298260':{'en': 'Movistar'},
'518298261':{'en': 'Movistar'},
'518298268':{'en': 'Movistar'},
'51829827':{'en': 'Claro'},
'518298298':{'en': 'Movistar'},
'51839836':{'en': 'Movistar'},
'51839837':{'en': 'Claro'},
'518398398':{'en': 'Movistar'},
'518398399':{'en': 'Movistar'},
'51849742':{'en': 'Claro'},
'518497430':{'en': 'Claro'},
'518497431':{'en': 'Claro'},
'518497435':{'en': 'Claro'},
'518497436':{'en': 'Claro'},
'518497437':{'en': 'Claro'},
'518497438':{'en': 'Claro'},
'518497439':{'en': 'Claro'},
'51849747':{'en': 'Claro'},
'518498400':{'en': 'Movistar'},
'518498401':{'en': 'Movistar'},
'518498402':{'en': 'Movistar'},
'518498403':{'en': 'Movistar'},
'518498404':{'en': 'Movistar'},
'518498405':{'en': 'Movistar'},
'518498406':{'en': 'Movistar'},
'51849841':{'en': 'Claro'},
'51849842':{'en': 'Claro'},
'51849843':{'en': 'Claro'},
'51849844':{'en': 'Claro'},
'51849845':{'en': 'Movistar'},
'51849846':{'en': 'Movistar'},
'51849847':{'en': 'Claro'},
'51849848':{'en': 'Movistar'},
'51849849':{'en': 'Movistar'},
'535':{'en': 'etecsa'},
'549113':{'en': 'Personal'},
'549114':{'en': 'Personal'},
'549115':{'en': 'Personal'},
'549116':{'en': 'Personal'},
'549220':{'en': 'Personal'},
'549221':{'en': 'Personal'},
'549222':{'en': 'Personal'},
'549223':{'en': 'Personal'},
'549224':{'en': 'Personal'},
'549225':{'en': 'Personal'},
'549226':{'en': 'Personal'},
'549227':{'en': 'Personal'},
'549228':{'en': 'Personal'},
'549229':{'en': 'Personal'},
'549230':{'en': 'Personal'},
'549231':{'en': 'Personal'},
'549232':{'en': 'Personal'},
'549233':{'en': 'Personal'},
'549234':{'en': 'Personal'},
'549235':{'en': 'Personal'},
'549236':{'en': 'Personal'},
'549239':{'en': 'Personal'},
'549247':{'en': 'Personal'},
'549249':{'en': 'Personal'},
'549260':{'en': 'Personal'},
'549261':{'en': 'Personal'},
'549262':{'en': 'Personal'},
'549263':{'en': 'Personal'},
'549264':{'en': 'Personal'},
'549265':{'en': 'Personal'},
'549266':{'en': 'Personal'},
'549280':{'en': 'Personal'},
'549290':{'en': 'Personal'},
'549291':{'en': 'Personal'},
'549292':{'en': 'Personal'},
'549293':{'en': 'Personal'},
'549294':{'en': 'Personal'},
'549295':{'en': 'Personal'},
'549296':{'en': 'Personal'},
'549297':{'en': 'Personal'},
'549298':{'en': 'Personal'},
'549299':{'en': 'Personal'},
'549332':{'en': 'Personal'},
'549336':{'en': 'Personal'},
'549338':{'en': 'Personal'},
'549340':{'en': 'Personal'},
'549341':{'en': 'Personal'},
'549342':{'en': 'Personal'},
'549343':{'en': 'Personal'},
'549344':{'en': 'Personal'},
'549345':{'en': 'Personal'},
'549346':{'en': 'Personal'},
'549347':{'en': 'Personal'},
'549348':{'en': 'Personal'},
'549349':{'en': 'Personal'},
'549351':{'en': 'Personal'},
'549352':{'en': 'Personal'},
'549353':{'en': 'Personal'},
'549354':{'en': 'Personal'},
'549356':{'en': 'Personal'},
'549357':{'en': 'Personal'},
'549358':{'en': 'Personal'},
'549362':{'en': 'Personal'},
'549364':{'en': 'Personal'},
'549370':{'en': 'Personal'},
'549371':{'en': 'Personal'},
'549372':{'en': 'Personal'},
'549373':{'en': 'Personal'},
'549374':{'en': 'Personal'},
'549375':{'en': 'Personal'},
'549376':{'en': 'Personal'},
'549377':{'en': 'Personal'},
'549378':{'en': 'Personal'},
'549379':{'en': 'Personal'},
'549380':{'en': 'Personal'},
'549381':{'en': 'Personal'},
'549382':{'en': 'Personal'},
'549383':{'en': 'Personal'},
'549384':{'en': 'Personal'},
'549385':{'en': 'Personal'},
'549386':{'en': 'Personal'},
'549387':{'en': 'Personal'},
'549388':{'en': 'Personal'},
'549389':{'en': 'Personal'},
'551195472':{'en': 'Vivo'},
'551195473':{'en': 'Vivo'},
'551195474':{'en': 'Vivo'},
'551195769':{'en': 'Vivo'},
'55119577':{'en': 'Vivo'},
'551195780':{'en': 'Vivo'},
'551195781':{'en': 'Vivo'},
'551195782':{'en': 'Vivo'},
'551195783':{'en': 'Vivo'},
'551195784':{'en': 'Vivo'},
'551195785':{'en': 'Vivo'},
'551195786':{'en': 'Vivo'},
'551196057':{'en': 'Vivo'},
'551196058':{'en': 'Vivo'},
'551196059':{'en': 'Vivo'},
'551196060':{'en': 'Vivo'},
'551196168':{'en': 'Claro BR'},
'551196169':{'en': 'Claro BR'},
'55119617':{'en': 'Claro BR'},
'55119618':{'en': 'Vivo'},
'551196180':{'en': 'Claro BR'},
'551196181':{'en': 'Claro BR'},
'55119619':{'en': 'Vivo'},
'55119630':{'en': 'Claro BR'},
'55119631':{'en': 'Claro BR'},
'55119632':{'en': 'Claro BR'},
'55119633':{'en': 'Claro BR'},
'55119637':{'en': 'Vivo'},
'55119638':{'en': 'Vivo'},
'55119639':{'en': 'Vivo'},
'55119640':{'en': 'Vivo'},
'55119641':{'en': 'Vivo'},
'55119647':{'en': 'Vivo'},
'55119648':{'en': 'Vivo'},
'55119649':{'en': 'Vivo'},
'55119657':{'en': 'Claro BR'},
'55119658':{'en': 'Claro BR'},
'55119659':{'en': 'Claro BR'},
'55119660':{'en': 'Claro BR'},
'55119661':{'en': 'Claro BR'},
'55119662':{'en': 'Claro BR'},
'55119663':{'en': 'Claro BR'},
'55119664':{'en': 'Claro BR'},
'551196650':{'en': 'Claro BR'},
'55119684':{'en': 'Vivo'},
'55119685':{'en': 'Vivo'},
'551196860':{'en': 'Vivo'},
'551196861':{'en': 'Vivo'},
'551196862':{'en': 'Vivo'},
'551196863':{'en': 'Vivo'},
'551196864':{'en': 'Vivo'},
'551196865':{'en': 'Vivo'},
'551196866':{'en': 'Vivo'},
'55119690':{'en': 'Vivo'},
'55119691':{'en': 'Claro BR'},
'551196910':{'en': | |
src_u.time
nc.variables[varn].coordinates = \
str(dimens.reverse())
nc.variables[varn].field = src_u.field
print('Creating boundary variables for '+vvar)
for sid in sides:
varn = vvar_out+str(sid)
print('Creating variable', varn)
dimens = list(dimens_v)
for dim in dimens:
if re.match(dimexcl[sid],dim):
dimens.remove(dim)
nc.createVariable(varn, 'f8', dimens, \
fill_value=spval)
nc.variables[varn].long_name = vvar_out + \
' ' + int[sid] + ' boundary condition'
try:
nc.variables[varn].units = src_v.units
except:
print(varn+' has no units')
nc.variables[varn].time = src_v.time
nc.variables[varn].coordinates = \
str(dimens.reverse())
nc.variables[varn].field = src_v.field
# get the right remap weights file
if rotate_part:
Cpos_u = 'rho'
Cpos_v = 'rho'
else:
Cpos_u = 'u'
Cpos_v = 'v'
# vertical interpolation from sigma to standard z level
# irange
if irange is None:
ssrange = (0,src_u.shape[-1])
else:
ssrange = irange
# jrange
if jrange is None:
jjrange = (0,src_u.shape[-2])
else:
jjrange = jrange
ndim = len(src_v.dimensions)-1
if ndim == 3:
print('vertical interpolation from sigma to standard z level')
src_uz = pycnal.remapping.sta2z( \
src_u[nt,:,ssrange[0]:ssrange[1]], \
srcgrd, srcgrdz, Cpos=Cpos_u, spval=spval, \
srange=ssrange)
else:
src_uz = src_u[nt,ssrange[0]:ssrange[1]]
# srange
if srange is None:
ssrange = (0,src_v.shape[-1])
else:
ssrange = srange
if ndim == 3:
src_vz = pycnal.remapping.sta2z( \
src_v[nt,:,ssrange[0]:ssrange[1]], \
srcgrd, srcgrdz, Cpos=Cpos_v, spval=spval, \
srange=ssrange)
else:
src_vz = src_v[nt,ssrange[0]:ssrange[1]]
src_vz = pycnal.remapping.flood2d(src_vz, srcgrdz, Cpos=Cpos_v, \
srange=ssrange, spval=spval, \
dmax=dmax)
# horizontal interpolation using scrip weights
Mp, Lp = dst_grd.hgrid.mask_rho.shape
if ndim == 3:
# vertical interpolation from standard z level to sigma
print('vertical interpolation from standard z level to sigma')
dst_u_north = pycnal.remapping.z2roms(dst_uz[:, Mp-2:Mp, 0:Lp], \
dst_grdz, dst_grd, Cpos='rho', spval=spval, \
flood=False, irange=(0,Lp), jrange=(Mp-2,Mp))
dst_u_south = pycnal.remapping.z2roms(dst_uz[:, 0:2, 0:Lp], \
dst_grdz, dst_grd, Cpos='rho', spval=spval, \
flood=False, irange=(0,Lp), jrange=(0,2))
dst_u_east = pycnal.remapping.z2roms(dst_uz[:, 0:Mp, Lp-2:Lp], \
dst_grdz, dst_grd, Cpos='rho', spval=spval, \
flood=False, irange=(Lp-2,Lp), jrange=(0,Mp))
dst_u_west = pycnal.remapping.z2roms(dst_uz[:, 0:Mp, 0:2], \
dst_grdz, dst_grd, Cpos='rho', spval=spval, \
flood=False, irange=(0,2), jrange=(0,Mp))
dst_v_north = pycnal.remapping.z2roms(dst_vz[:, Mp-2:Mp, 0:Lp], \
dst_grdz, dst_grd, Cpos='rho', spval=spval, \
flood=False, irange=(0,Lp), jrange=(Mp-2,Mp))
dst_v_south = pycnal.remapping.z2roms(dst_vz[:, 0:2, 0:Lp], \
dst_grdz, dst_grd, Cpos='rho', spval=spval, \
flood=False, irange=(0,Lp), jrange=(0,2))
dst_v_east = pycnal.remapping.z2roms(dst_vz[:, 0:Mp, Lp-2:Lp], \
dst_grdz, dst_grd, Cpos='rho', spval=spval, \
flood=False, irange=(Lp-2,Lp), jrange=(0,Mp))
dst_v_west = pycnal.remapping.z2roms(dst_vz[:, 0:Mp, 0:2], \
dst_grdz, dst_grd, Cpos='rho', spval=spval, \
flood=False, irange=(0,2), jrange=(0,Mp))
else:
dst_u_north = dst_uz[Mp-2:Mp, 0:Lp]
dst_u_south = dst_uz[0:2, 0:Lp]
dst_u_east = dst_uz[0:Mp, Lp-2:Lp]
dst_u_west = dst_uz[0:Mp, 0:2]
dst_v_north = dst_vz[Mp-2:Mp, 0:Lp]
dst_v_south = dst_vz[0:2, 0:Lp]
dst_v_east = dst_vz[0:Mp, Lp-2:Lp]
dst_v_west = dst_vz[0:Mp, 0:2]
# rotate u,v fields
if rotate_part:
src_angle = np.zeros(dst_grd.hgrid.angle_rho.shape)
else:
src_ang = srcgrd.hgrid.angle_rho[ssrange[0]:ssrange[1]]
dst_angle = dst_grd.hgrid.angle_rho
angle = dst_angle - src_angle
if ndim == 3:
angle = np.tile(angle, (dst_grd.vgrid.N, 1, 1))
U_north = dst_u_north + dst_v_north*1j
eitheta_north = np.exp(-1j*angle[:,Mp-2:Mp, 0:Lp])
U_south = dst_u_south + dst_v_south*1j
eitheta_south = np.exp(-1j*angle[:,0:2, 0:Lp])
U_east = dst_u_east + dst_v_east*1j
eitheta_east = np.exp(-1j*angle[:,0:Mp, Lp-2:Lp])
U_west = dst_u_west + dst_v_west*1j
eitheta_west = np.exp(-1j*angle[:,0:Mp, 0:2])
else:
U_north = dst_u_north + dst_v_north*1j
eitheta_north = np.exp(-1j*angle[Mp-2:Mp, 0:Lp])
U_south = dst_u_south + dst_v_south*1j
eitheta_south = np.exp(-1j*angle[0:2, 0:Lp])
U_east = dst_u_east + dst_v_east*1j
eitheta_east = np.exp(-1j*angle[0:Mp, Lp-2:Lp])
U_west = dst_u_west + dst_v_west*1j
eitheta_west = np.exp(-1j*angle[0:Mp, 0:2])
U_north = U_north * eitheta_north
dst_u_north = np.real(U_north)
dst_v_north = np.imag(U_north)
U_south = U_south * eitheta_south
dst_u_south = np.real(U_south)
dst_v_south = np.imag(U_south)
U_east = U_east * eitheta_east
dst_u_east = np.real(U_east)
dst_v_east = np.imag(U_east)
U_west = U_west * eitheta_west
dst_u_west = np.real(U_west)
dst_v_east = np.imag(U_east)
# move back to u,v points
if ndim == 3:
dst_u_north = 0.5 * np.squeeze(dst_u_north[:,-1,:-1] + \
dst_u_north[:,-1,1:])
dst_v_north = 0.5 * np.squeeze(dst_v_north[:,:-1,:] + \
dst_v_north[:,1:,:])
dst_u_south = 0.5 * np.squeeze(dst_u_south[:,0,:-1] + \
dst_u_south[:,0,1:])
dst_v_south = 0.5 * np.squeeze(dst_v_south[:,:-1,:] + \
dst_v_south[:,1:,:])
dst_u_east = 0.5 * np.squeeze(dst_u_east[:,:,:-1] + \
dst_u_east[:,:,1:])
dst_v_east = 0.5 * np.squeeze(dst_v_east[:,:-1,-1] + \
dst_v_east[:,1:,-1])
dst_u_west = 0.5 * np.squeeze(dst_u_west[:,:,:-1] + \
dst_u_west[:,:,1:])
dst_v_west = 0.5 * np.squeeze(dst_v_west[:,:-1,0] + \
dst_v_west[:,1:,0])
else:
dst_u_north = 0.5 * np.squeeze(dst_u_north[-1,:-1] + \
dst_u_north[-1,1:])
dst_v_north = 0.5 * np.squeeze(dst_v_north[:-1,:] + \
dst_v_north[1:,:])
dst_u_south = 0.5 * np.squeeze(dst_u_south[0,:-1] + \
dst_u_south[0,1:])
dst_v_south = 0.5 * np.squeeze(dst_v_south[:-1,:] + \
dst_v_south[1:,:])
dst_u_east = 0.5 * np.squeeze(dst_u_east[:,:-1] + \
dst_u_east[:,1:])
dst_v_east = 0.5 * np.squeeze(dst_v_east[:-1,-1] + \
dst_v_east[1:,-1])
dst_u_west = 0.5 * np.squeeze(dst_u_west[:,:-1] + \
dst_u_west[:,1:])
dst_v_west = 0.5 * np.squeeze(dst_v_west[:-1,0] + \
dst_v_west[1:,0])
# spval
idxu_north = np.where(dst_grd.hgrid.mask_u[-1,:] == 0)
idxv_north = np.where(dst_grd.hgrid.mask_v[-1,:] == 0)
idxu_south = np.where(dst_grd.hgrid.mask_u[0,:] == 0)
idxv_south = np.where(dst_grd.hgrid.mask_v[0,:] == 0)
idxu_east = np.where(dst_grd.hgrid.mask_u[:,-1] == 0)
idxv_east = np.where(dst_grd.hgrid.mask_v[:,-1] == 0)
idxu_west = np.where(dst_grd.hgrid.mask_u[:,0] == 0)
idxv_west = np.where(dst_grd.hgrid.mask_v[:,0] == 0)
if ndim == 3:
for n in range(dst_grd.vgrid.N):
dst_u_north[n, idxu_north[0]] = spval
dst_v_north[n, idxv_north[0]] = spval
dst_u_south[n, idxu_south[0]] = spval
dst_v_south[n, idxv_south[0]] = spval
dst_u_east[n, idxu_east[0]] = spval
dst_v_east[n, idxv_east[0]] = spval
dst_u_west[n, idxu_west[0]] = spval
dst_v_west[n, idxv_west[0]] = spval
else:
dst_u_north[idxu_north[0]] = spval
dst_v_north[idxv_north[0]] = spval
dst_u_south[idxu_south[0]] = spval
dst_v_south[idxv_south[0]] = spval
dst_u_east[idxu_east[0]] = spval
dst_v_east[idxv_east[0]] = spval
dst_u_west[idxu_west[0]] = spval
dst_v_west[idxv_west[0]] = spval
# write data in destination file
print('write data in destination file')
sid = '_west'
varn = uvar_out+str(sid)
nc.variables[varn][nctidx] = dst_u_west
varn = vvar_out+str(sid)
nc.variables[varn][nctidx] = dst_v_west
sid = '_north'
varn = uvar_out+str(sid)
nc.variables[varn][nctidx] = dst_u_north
varn = vvar_out+str(sid)
nc.variables[varn][nctidx] = dst_v_north
sid = '_east'
varn = uvar_out+str(sid)
nc.variables[varn][nctidx] = dst_u_east
varn = vvar_out+str(sid)
nc.variables[varn][nctidx] = dst_v_east
sid = '_south'
varn = uvar_out+str(sid)
nc.variables[varn][nctidx] = dst_u_south
varn = vvar_out+str(sid)
nc.variables[varn][nctidx] = dst_v_south
if compute_ubar:
if nctidx == 0:
print('Creating variable ubar_north')
nc.createVariable('ubar_north', 'f8', \
('ocean_time', 'xi_u'), fill_value=spval)
nc.variables['ubar_north'].long_name = \
'2D u-momentum north boundary condition'
nc.variables['ubar_north'].units = 'meter second-1'
nc.variables['ubar_north'].time = 'ocean_time'
nc.variables['ubar_north'].coordinates = 'xi_u ocean_time'
nc.variables['ubar_north'].field = 'ubar_north, scalar, series'
print('Creating variable vbar_north')
nc.createVariable('vbar_north', 'f8', \
('ocean_time', 'xi_v'), fill_value=spval)
nc.variables['vbar_north'].long_name = \
'2D v-momentum north boundary condition'
nc.variables['vbar_north'].units = 'meter second-1'
nc.variables['vbar_north'].time = 'ocean_time'
nc.variables['vbar_north'].coordinates = 'xi_v ocean_time'
nc.variables['vbar_north'].field = 'vbar_north,, scalar, series'
print('Creating variable ubar_south')
nc.createVariable('ubar_south', 'f8', \
('ocean_time', 'xi_u'), fill_value=spval)
nc.variables['ubar_south'].long_name = \
'2D u-momentum south boundary condition'
nc.variables['ubar_south'].units = 'meter second-1'
nc.variables['ubar_south'].time = 'ocean_time'
nc.variables['ubar_south'].coordinates = 'xi_u ocean_time'
nc.variables['ubar_south'].field = 'ubar_south, scalar, series'
print('Creating variable vbar_south')
nc.createVariable('vbar_south', 'f8', \
('ocean_time', 'xi_v'), fill_value=spval)
nc.variables['vbar_south'].long_name = \
'2D v-momentum south boundary condition'
nc.variables['vbar_south'].units = 'meter second-1'
nc.variables['vbar_south'].time = 'ocean_time'
nc.variables['vbar_south'].coordinates = 'xi_v ocean_time'
print('Creating variable ubar_west')
nc.createVariable('ubar_west', 'f8', \
('ocean_time', 'eta_u'), fill_value=spval)
nc.variables['ubar_west'].long_name = \
'2D u-momentum west boundary condition'
nc.variables['ubar_west'].units = 'meter second-1'
nc.variables['ubar_west'].time = 'ocean_time'
nc.variables['ubar_west'].coordinates = 'eta_u ocean_time'
nc.variables['ubar_west'].field = 'ubar_west, scalar, series'
print('Creating variable vbar_west')
nc.createVariable('vbar_west', 'f8', \
('ocean_time', 'eta_v'), fill_value=spval)
nc.variables['vbar_west'].long_name = \
'2D v-momentum west boundary condition'
nc.variables['vbar_west'].units = 'meter second-1'
nc.variables['vbar_west'].time = 'ocean_time'
nc.variables['vbar_west'].coordinates = 'eta_v ocean_time'
print('Creating variable ubar_east')
nc.createVariable('ubar_east', 'f8', \
('ocean_time', 'eta_u'), fill_value=spval)
nc.variables['ubar_east'].long_name = \
'2D u-momentum east boundary condition'
nc.variables['ubar_east'].units = 'meter second-1'
nc.variables['ubar_east'].time = 'ocean_time'
nc.variables['ubar_east'].coordinates = 'eta_u ocean_time'
nc.variables['ubar_east'].field = 'ubar_east, scalar, series'
print('Creating variable vbar_east')
nc.createVariable('vbar_east', 'f8', \
('ocean_time', 'eta_v'), fill_value=spval)
nc.variables['vbar_east'].long_name = \
'2D v-momentum east boundary condition'
nc.variables['vbar_east'].units = 'meter second-1'
nc.variables['vbar_east'].time = 'ocean_time'
nc.variables['vbar_east'].coordinates = 'eta_v ocean_time'
# compute depth average velocity ubar and vbar
# get z at the right position
print('Computing ubar/vbar from u/v')
z_u_north = 0.5 * (dst_grd.vgrid.z_w[0,:,-1,:-1] +
dst_grd.vgrid.z_w[0,:,-1, 1:])
z_v_north = 0.5 * (dst_grd.vgrid.z_w[0,:,-1,:] +
dst_grd.vgrid.z_w[0,:,-2,:])
z_u_south = 0.5 * (dst_grd.vgrid.z_w[0,:,0,:-1] +
dst_grd.vgrid.z_w[0,:,0,1:])
z_v_south = 0.5 * (dst_grd.vgrid.z_w[0,:,0,:] +
dst_grd.vgrid.z_w[0,:,1,:])
z_u_east = 0.5 * (dst_grd.vgrid.z_w[0,:,:,-1] +
dst_grd.vgrid.z_w[0,:,:,-2])
z_v_east = 0.5 * (dst_grd.vgrid.z_w[0,:,:-1,-1] +
dst_grd.vgrid.z_w[0,:,1:,-1])
z_u_west = 0.5 * (dst_grd.vgrid.z_w[0,:,:,0] +
dst_grd.vgrid.z_w[0,:,:,1])
z_v_west = 0.5 * (dst_grd.vgrid.z_w[0,:,:-1,0] +
dst_grd.vgrid.z_w[0,:,1:,0])
if not rotate_uv:
dst_u_north = np.squeeze(dst_u_north)
dst_v_north = np.squeeze(dst_v_north)
dst_u_south = np.squeeze(dst_u_south)
dst_v_south = np.squeeze(dst_v_south)
dst_u_east = np.squeeze(dst_u_east)
dst_v_east = np.squeeze(dst_v_east)
dst_u_west = np.squeeze(dst_u_west)
dst_v_west = np.squeeze(dst_v_west)
dst_ubar_north = np.zeros(dst_u_north.shape[1])
dst_ubar_south = np.zeros(dst_u_south.shape[1])
dst_ubar_east = np.zeros(dst_u_east.shape[1])
dst_ubar_west = np.zeros(dst_u_west.shape[1])
dst_vbar_north = np.zeros(dst_v_north.shape[1])
dst_vbar_south = np.zeros(dst_v_south.shape[1])
dst_vbar_east = np.zeros(dst_v_east.shape[1])
dst_vbar_west = np.zeros(dst_v_west.shape[1])
# print 'Shapes 3', dst_u_north.shape, dst_ubar_north.shape, z_u_north.shape, np.diff(z_u_north[:,1]).shape
for i in range(dst_u_north.shape[1]):
dst_ubar_north[i] = (dst_u_north[:,i] * \
np.diff(z_u_north[:,i])).sum() / -z_u_north[0,i]
dst_ubar_south[i] = (dst_u_south[:,i] * \
np.diff(z_u_south[:,i])).sum() / -z_u_south[0,i]
for i in range(dst_v_north.shape[1]):
dst_vbar_north[i] = (dst_v_north[:,i] * \
np.diff(z_v_north[:,i])).sum() / -z_v_north[0,i]
dst_vbar_south[i] = (dst_v_south[:,i] * \
np.diff(z_v_south[:,i])).sum() / -z_v_south[0,i]
for j in range(dst_u_east.shape[1]):
dst_ubar_east[j] = (dst_u_east[:,j] * \
| |
IntPtr[] processIds,
uint cb,
[MarshalAs(UnmanagedType.U4)]
out uint pBytesReturned);
[DllImport("kernel32.dll", SetLastError = true)]
private static extern IntPtr OpenProcess(
ProcessAccessFlags processAccess,
bool bInheritHandle,
IntPtr processId);
[DllImport("advapi32.dll", SetLastError = true)]
private static extern bool OpenProcessToken(
IntPtr ProcessHandle,
TokenAccessLevels DesiredAccess,
out IntPtr TokenHandle);
[DllImport("advapi32.dll", SetLastError = true)]
private static extern bool ConvertSidToStringSidW(
IntPtr pSID,
[MarshalAs(UnmanagedType.LPTStr)]
out string StringSid);
[DllImport("advapi32", SetLastError = true)]
private static extern bool DuplicateTokenEx(
IntPtr hExistingToken,
TokenAccessLevels dwDesiredAccess,
IntPtr lpTokenAttributes,
SECURITY_IMPERSONATION_LEVEL ImpersonationLevel,
TOKEN_TYPE TokenType,
out IntPtr phNewToken);
[DllImport("advapi32.dll", SetLastError = true)]
private static extern bool ImpersonateLoggedOnUser(
IntPtr hToken);
[DllImport("advapi32.dll", SetLastError = true)]
private static extern bool RevertToSelf();
public static CommandResult RunAsUser(string username, string password, string lpCommandLine,
string lpCurrentDirectory, string stdinInput, LogonFlags logonFlags, LogonType logonType)
{
SecurityIdentifier account = null;
if (logonType != LogonType.LOGON32_LOGON_NEW_CREDENTIALS)
{
account = GetBecomeSid(username);
}
STARTUPINFOEX si = new STARTUPINFOEX();
si.startupInfo.dwFlags = (int)StartupInfoFlags.USESTDHANDLES;
SECURITY_ATTRIBUTES pipesec = new SECURITY_ATTRIBUTES();
pipesec.bInheritHandle = true;
// Create the stdout, stderr and stdin pipes used in the process and add to the startupInfo
SafeFileHandle stdout_read, stdout_write, stderr_read, stderr_write, stdin_read, stdin_write;
if (!CreatePipe(out stdout_read, out stdout_write, pipesec, 0))
throw new Win32Exception("STDOUT pipe setup failed");
if (!SetHandleInformation(stdout_read, HandleFlags.INHERIT, 0))
throw new Win32Exception("STDOUT pipe handle setup failed");
if (!CreatePipe(out stderr_read, out stderr_write, pipesec, 0))
throw new Win32Exception("STDERR pipe setup failed");
if (!SetHandleInformation(stderr_read, HandleFlags.INHERIT, 0))
throw new Win32Exception("STDERR pipe handle setup failed");
if (!CreatePipe(out stdin_read, out stdin_write, pipesec, 0))
throw new Win32Exception("STDIN pipe setup failed");
if (!SetHandleInformation(stdin_write, HandleFlags.INHERIT, 0))
throw new Win32Exception("STDIN pipe handle setup failed");
si.startupInfo.hStdOutput = stdout_write;
si.startupInfo.hStdError = stderr_write;
si.startupInfo.hStdInput = stdin_read;
// Setup the stdin buffer
UTF8Encoding utf8_encoding = new UTF8Encoding(false);
FileStream stdin_fs = new FileStream(stdin_write, FileAccess.Write, 32768);
StreamWriter stdin = new StreamWriter(stdin_fs, utf8_encoding, 32768);
// Create the environment block if set
IntPtr lpEnvironment = IntPtr.Zero;
CreationFlags startup_flags = CreationFlags.CREATE_UNICODE_ENVIRONMENT;
PROCESS_INFORMATION pi = new PROCESS_INFORMATION();
// Get the user tokens to try running processes with
List<IntPtr> tokens = GetUserTokens(account, username, password, logonType);
bool launch_success = false;
foreach (IntPtr token in tokens)
{
if (CreateProcessWithTokenW(
token,
logonFlags,
null,
new StringBuilder(lpCommandLine),
startup_flags,
lpEnvironment,
lpCurrentDirectory,
si,
out pi))
{
launch_success = true;
break;
}
}
if (!launch_success)
throw new Win32Exception("Failed to start become process");
CommandResult result = new CommandResult();
// Setup the output buffers and get stdout/stderr
FileStream stdout_fs = new FileStream(stdout_read, FileAccess.Read, 4096);
StreamReader stdout = new StreamReader(stdout_fs, utf8_encoding, true, 4096);
stdout_write.Close();
FileStream stderr_fs = new FileStream(stderr_read, FileAccess.Read, 4096);
StreamReader stderr = new StreamReader(stderr_fs, utf8_encoding, true, 4096);
stderr_write.Close();
stdin.WriteLine(stdinInput);
stdin.Close();
string stdout_str, stderr_str = null;
GetProcessOutput(stdout, stderr, out stdout_str, out stderr_str);
UInt32 rc = GetProcessExitCode(pi.hProcess);
result.StandardOut = stdout_str;
result.StandardError = stderr_str;
result.ExitCode = rc;
return result;
}
private static SecurityIdentifier GetBecomeSid(string username)
{
NTAccount account = new NTAccount(username);
try
{
SecurityIdentifier security_identifier = (SecurityIdentifier)account.Translate(typeof(SecurityIdentifier));
return security_identifier;
}
catch (IdentityNotMappedException ex)
{
throw new Exception(String.Format("Unable to find become user {0}: {1}", username, ex.Message));
}
}
private static List<IntPtr> GetUserTokens(SecurityIdentifier account, string username, string password, LogonType logonType)
{
List<IntPtr> tokens = new List<IntPtr>();
List<String> service_sids = new List<String>()
{
"S-1-5-18", // NT AUTHORITY\SYSTEM
"S-1-5-19", // NT AUTHORITY\LocalService
"S-1-5-20" // NT AUTHORITY\NetworkService
};
IntPtr hSystemToken = IntPtr.Zero;
string account_sid = "";
if (logonType != LogonType.LOGON32_LOGON_NEW_CREDENTIALS)
{
GrantAccessToWindowStationAndDesktop(account);
// Try to get SYSTEM token handle so we can impersonate to get full admin token
hSystemToken = GetSystemUserHandle();
account_sid = account.ToString();
}
bool impersonated = false;
try
{
IntPtr hSystemTokenDup = IntPtr.Zero;
if (hSystemToken == IntPtr.Zero && service_sids.Contains(account_sid))
{
// We need the SYSTEM token if we want to become one of those accounts, fail here
throw new Win32Exception("Failed to get token for NT AUTHORITY\\SYSTEM");
}
else if (hSystemToken != IntPtr.Zero)
{
// We have the token, need to duplicate and impersonate
bool dupResult = DuplicateTokenEx(
hSystemToken,
TokenAccessLevels.MaximumAllowed,
IntPtr.Zero,
SECURITY_IMPERSONATION_LEVEL.SecurityImpersonation,
TOKEN_TYPE.TokenPrimary,
out hSystemTokenDup);
int lastError = Marshal.GetLastWin32Error();
CloseHandle(hSystemToken);
if (!dupResult && service_sids.Contains(account_sid))
throw new Win32Exception(lastError, "Failed to duplicate token for NT AUTHORITY\\SYSTEM");
else if (dupResult && account_sid != "S-1-5-18")
{
if (ImpersonateLoggedOnUser(hSystemTokenDup))
impersonated = true;
else if (service_sids.Contains(account_sid))
throw new Win32Exception("Failed to impersonate as SYSTEM account");
}
// If SYSTEM impersonation failed but we're trying to become a regular user, just proceed;
// might get a limited token in UAC-enabled cases, but better than nothing...
}
string domain = null;
if (service_sids.Contains(account_sid))
{
// We're using a well-known service account, do a service logon instead of the actual flag set
logonType = LogonType.LOGON32_LOGON_SERVICE;
domain = "NT AUTHORITY";
password = <PASSWORD>;
switch (account_sid)
{
case "S-1-5-18":
tokens.Add(hSystemTokenDup);
return tokens;
case "S-1-5-19":
username = "LocalService";
break;
case "S-1-5-20":
username = "NetworkService";
break;
}
}
else
{
// We are trying to become a local or domain account
if (username.Contains(@"\"))
{
var user_split = username.Split(Convert.ToChar(@"\"));
domain = user_split[0];
username = user_split[1];
}
else if (username.Contains("@"))
domain = null;
else
domain = ".";
}
IntPtr hToken = IntPtr.Zero;
if (!LogonUser(
username,
domain,
password,
logonType,
LogonProvider.LOGON32_PROVIDER_DEFAULT,
out hToken))
{
throw new Win32Exception("LogonUser failed");
}
if (!service_sids.Contains(account_sid))
{
// Try and get the elevated token for local/domain account
IntPtr hTokenElevated = GetElevatedToken(hToken);
tokens.Add(hTokenElevated);
}
// add the original token as a fallback
tokens.Add(hToken);
}
finally
{
if (impersonated)
RevertToSelf();
}
return tokens;
}
private static IntPtr GetSystemUserHandle()
{
uint array_byte_size = 1024 * sizeof(uint);
IntPtr[] pids = new IntPtr[1024];
uint bytes_copied;
if (!EnumProcesses(pids, array_byte_size, out bytes_copied))
{
throw new Win32Exception("Failed to enumerate processes");
}
// TODO: Handle if bytes_copied is larger than the array size and rerun EnumProcesses with larger array
uint num_processes = bytes_copied / sizeof(uint);
for (uint i = 0; i < num_processes; i++)
{
IntPtr hProcess = OpenProcess(ProcessAccessFlags.PROCESS_QUERY_INFORMATION, false, pids[i]);
if (hProcess != IntPtr.Zero)
{
IntPtr hToken = IntPtr.Zero;
// According to CreateProcessWithTokenW we require a token with
// TOKEN_QUERY, TOKEN_DUPLICATE and TOKEN_ASSIGN_PRIMARY
// Also add in TOKEN_IMPERSONATE so we can get an impersontated token
TokenAccessLevels desired_access = TokenAccessLevels.Query |
TokenAccessLevels.Duplicate |
TokenAccessLevels.AssignPrimary |
TokenAccessLevels.Impersonate;
if (OpenProcessToken(hProcess, desired_access, out hToken))
{
string sid = GetTokenUserSID(hToken);
if (sid == "S-1-5-18")
{
CloseHandle(hProcess);
return hToken;
}
}
CloseHandle(hToken);
}
CloseHandle(hProcess);
}
return IntPtr.Zero;
}
private static string GetTokenUserSID(IntPtr hToken)
{
uint token_length;
string sid;
if (!GetTokenInformation(hToken, TokenInformationClass.TokenUser, IntPtr.Zero, 0, out token_length))
{
int last_err = Marshal.GetLastWin32Error();
if (last_err != 122) // ERROR_INSUFFICIENT_BUFFER
throw new Win32Exception(last_err, "Failed to get TokenUser length");
}
IntPtr token_information = Marshal.AllocHGlobal((int)token_length);
try
{
if (!GetTokenInformation(hToken, TokenInformationClass.TokenUser, token_information, token_length, out token_length))
throw new Win32Exception("Failed to get TokenUser information");
TOKEN_USER token_user = (TOKEN_USER)Marshal.PtrToStructure(token_information, typeof(TOKEN_USER));
if (!ConvertSidToStringSidW(token_user.User.Sid, out sid))
throw new Win32Exception("Failed to get user SID");
}
finally
{
Marshal.FreeHGlobal(token_information);
}
return sid;
}
private static void GetProcessOutput(StreamReader stdoutStream, StreamReader stderrStream, out string stdout, out string stderr)
{
var sowait = new EventWaitHandle(false, EventResetMode.ManualReset);
var sewait = new EventWaitHandle(false, EventResetMode.ManualReset);
string so = null, se = null;
ThreadPool.QueueUserWorkItem((s) =>
{
so = stdoutStream.ReadToEnd();
sowait.Set();
});
ThreadPool.QueueUserWorkItem((s) =>
{
se = stderrStream.ReadToEnd();
sewait.Set();
});
foreach (var wh in new WaitHandle[] { sowait, sewait })
wh.WaitOne();
stdout = so;
stderr = se;
}
private static uint GetProcessExitCode(IntPtr processHandle)
{
new NativeWaitHandle(processHandle).WaitOne();
uint exitCode;
if (!GetExitCodeProcess(processHandle, out exitCode))
throw new Win32Exception("Error getting process exit code");
return exitCode;
}
private static IntPtr GetElevatedToken(IntPtr hToken)
{
uint requestedLength;
IntPtr pTokenInfo = Marshal.AllocHGlobal(sizeof(int));
try
{
if (!GetTokenInformation(hToken, TokenInformationClass.TokenElevationType, pTokenInfo, sizeof(int), out requestedLength))
throw new Win32Exception("Unable to get TokenElevationType");
var tet = (TokenElevationType)Marshal.ReadInt32(pTokenInfo);
// we already have the best token we can get, just use it
if (tet != TokenElevationType.TokenElevationTypeLimited)
return hToken;
GetTokenInformation(hToken, TokenInformationClass.TokenLinkedToken, IntPtr.Zero, 0, out requestedLength);
IntPtr pLinkedToken = Marshal.AllocHGlobal((int)requestedLength);
if (!GetTokenInformation(hToken, TokenInformationClass.TokenLinkedToken, pLinkedToken, requestedLength, out requestedLength))
throw new Win32Exception("Unable to get linked token");
IntPtr linkedToken = Marshal.ReadIntPtr(pLinkedToken);
Marshal.FreeHGlobal(pLinkedToken);
return linkedToken;
}
finally
{
Marshal.FreeHGlobal(pTokenInfo);
}
}
private static void GrantAccessToWindowStationAndDesktop(SecurityIdentifier account)
{
const int WindowStationAllAccess = 0x000f037f;
GrantAccess(account, GetProcessWindowStation(), WindowStationAllAccess);
const int DesktopRightsAllAccess = 0x000f01ff;
GrantAccess(account, GetThreadDesktop(GetCurrentThreadId()), DesktopRightsAllAccess);
}
private static void GrantAccess(SecurityIdentifier account, IntPtr handle, int accessMask)
{
SafeHandle safeHandle = new NoopSafeHandle(handle);
GenericSecurity security =
new GenericSecurity(false, ResourceType.WindowObject, safeHandle, AccessControlSections.Access);
security.AddAccessRule(
new GenericAccessRule(account, accessMask, AccessControlType.Allow));
security.Persist(safeHandle, | |
<reponame>GG-yuki/bugs<filename>python/Nogo/3_try/all.py<gh_stars>0
# -*- coding: utf-8 -*-
"""
human VS AI models
Input your move in the format: 2,3
@author: <NAME>
"""
import copy
import json
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
class DisjointNode(object): # 并查集项
def __init__(self, parent: int = -1, belonging: int = -1) -> None:
self.parent = parent # 父项
self.belonging = belonging # 归属
class Block(object): # 连通块项
def __init__(self, belonging: int, ki: set) -> None:
# self.ancestor = ancestor # 祖先
self.belonging = belonging # 归属
self.ki = ki # 气集合
class Board(object):
def __init__(self, **kwargs) -> None:
self.width = int(kwargs.get('width', 9)) # 棋盘宽度 9
self.height = int(kwargs.get('height', 9)) # 棋盘高度 9
self.states = {}
# 棋盘状态,以字典存储
# key: 移动(数字标号)
# value: 棋手(标号)
self.players = [1, 2] # 两个棋手
def init_board(self, start_player: int = 0) -> None:
self.current_player = self.players[start_player] # 先手棋手
self.availables_1 = list(range(self.width * self.height)) # 将全部可落子位置装入列表
self.availables_2 = list(range(self.width * self.height)) # 将全部可落子位置装入列表
# self.availables = []
# self.set_current_availables()
self.states = {}
self.last_move = -1
self.disjoint = []
for i in range(self.width * self.height):
self.disjoint.append(DisjointNode(-1, -1))
self.blocks = {} # 连通块字典
def move_to_location(self, move: int) -> []:
h = move // self.width # 行坐标
w = move % self.width # 列坐标
return [h, w] # 返回坐标
def location_to_move(self, location: []) -> int:
if len(location) != 2: # 列表形式不合规
return -1
h = location[0] # 行坐标
w = location[1] # 列坐标
m = h * self.width + w # 移动数字标号
if m not in range(self.width * self.height): # 超出棋盘范围
return -1
return m # 返回数字标号
def current_state(self) -> []: # 以当前棋手的角度返回棋盘状态
square_state = np.zeros((4, self.width, self.height)) # 状态形状:4*宽*高
if self.states:
moves, players = np.array(list(zip(*self.states.items()))) # 取出moves和players?
move_curr = moves[players == self.current_player] # 当前棋手的moves
move_oppo = moves[players != self.current_player] # 对手的moves
square_state[0][move_curr // self.width, move_curr % self.height] = 1.0
square_state[1][move_oppo // self.width, move_oppo % self.height] = 1.0
square_state[2][self.last_move // self.width, self.last_move % self.height] = 1.0
if len(self.states) % 2 == 0:
square_state[3][:, :] = 1.0
return square_state[:, ::-1, :]
def do_move(self, move: int) -> None:
self.states[move] = self.current_player
if move in self.availables_1:
self.availables_1.remove(move) # 从可落子列表删除落子点
if move in self.availables_2:
self.availables_2.remove(move) # 从可落子列表删除落子点
self.maintain_blocks(move) # 维护连通块字典
self.refresh_availables() # 更新可落子列表
self.current_player = self.get_current_opponent() # 换手
self.last_move = move
def maintain_blocks(self, move: int) -> None:
self.disjoint[move].parent = move # 修改落子点的父项为自己
self.disjoint[move].belonging = self.current_player # 修改落子点归属
up, down, left, right = self.up_down_left_right(move)
udlr = {up, down, left, right} - {-1} # 四向,去除-1
blanks = set() # 落子点周围空格集合
for u in udlr:
if self.disjoint[u].parent == -1: # 如果是四向中的空格
blanks.add(u) # 加入空格集合
self.blocks[move] = Block(self.current_player, blanks) # 建立新块
for u in udlr:
if self.disjoint[u].belonging == self.get_current_opponent(): # 如果是四向中的对手棋子
that_ancestor = self.get_ancestor(u) # 该棋子所属连通块的祖先
self.blocks[that_ancestor].ki -= {move} # 该连通块气集合移除落子点
for u in udlr:
if self.disjoint[u].belonging == self.current_player: # 如果是四向中的本方棋子
that_ancestor = self.get_ancestor(u) # 该棋子所属连通块的祖先
if that_ancestor != move:
self.blocks[that_ancestor].ki -= {move} # 该连通块气集合移除落子点
self.disjoint[that_ancestor].parent = move # 该祖先的父项设置为落子点
self.blocks[move].ki = self.blocks[move].ki | self.blocks[that_ancestor].ki # 将该连通块的气集合加入新块
self.blocks.pop(that_ancestor) # 删除该连通块
self.blocks[move].ki = self.blocks[move].ki - {move}
def refresh_availables(self) -> None:
for a in self.availables_1:
up, down, left, right = self.up_down_left_right(a)
udlr = {up, down, left, right} - {-1} # 四向,去除-1
lib_place = 0
test_ki = set()
for u in udlr:
if self.disjoint[u].belonging == -1: # 是相邻空格
test_ki = test_ki | {u}
lib_place += 1
elif self.disjoint[u].belonging == self.players[1]: # 是对手的子
that_ancestor = self.get_ancestor(u) # 获取其所在块的祖先
if len(self.blocks[that_ancestor].ki) < 2: # 对方块气数<2,落子会导致吃子
if a in self.availables_1:
self.availables_1.remove(a) # 本方不可在此落子,会导致吃子
break # 检查下一个可落子点
else: # 是本方的子
lib_place += 1
if lib_place < 1: # 周围没有空格或本方的子
if a in self.availables_1:
self.availables_1.remove(a) # 本方不可在此落子,周围都是对手的子
for u in udlr:
if self.disjoint[u].belonging == self.players[0]: # 是本方的子
that_ancestor = self.get_ancestor(u) # 获取其所在块的祖先
test_ki = test_ki | self.blocks[that_ancestor].ki - {a} # 计算潜在新块总气数
if len(test_ki) < 1: # 潜在新块总气数<1
if a in self.availables_1:
self.availables_1.remove(a) # 本方不可在此落子,会导致本方已有块无气
for b in self.availables_2:
up, down, left, right = self.up_down_left_right(b)
udlr = {up, down, left, right} - {-1} # 四向,去除-1
lib_place = 0
test_ki = set()
for u in udlr:
if self.disjoint[u].belonging == -1: # 是相邻空格
test_ki = test_ki | {u}
lib_place += 1
elif self.disjoint[u].belonging == self.players[0]: # 是对手的子
that_ancestor = self.get_ancestor(u) # 获取其所在块的祖先
if len(self.blocks[that_ancestor].ki) < 2: # 对方块气数<2,落子会导致吃子
if b in self.availables_2:
self.availables_2.remove(b) # 本方不可在此落子,会导致吃子
break # 检查下一个可落子点
else: # 是本方的子
lib_place += 1
if lib_place < 1: # 周围没有空格或本方的子
if b in self.availables_2:
self.availables_2.remove(b) # 本方不可在此落子,周围都是对手的子
for u in udlr:
if self.disjoint[u].belonging == self.players[1]: # 是本方的子
that_ancestor = self.get_ancestor(u) # 获取其所在块的祖先
test_ki = test_ki | self.blocks[that_ancestor].ki - {b} # 计算潜在新块总气数
if len(test_ki) < 1: # 潜在新块总气数<1
if b in self.availables_2:
self.availables_2.remove(b) # 本方不可在此落子,会导致本方已有块无气
def get_current_player(self) -> int: # 获取本方
return self.current_player
def get_current_opponent(self) -> int: # 获取对手
if self.current_player == self.players[0]:
return self.players[1]
else:
return self.players[0]
def up_down_left_right(self, point: int): # 获取四向
if point % self.width != 0:
left = point - 1
else:
left = -1
if point % self.width != self.width - 1:
right = point + 1
else:
right = -1
if point < self.width * (self.width - 1):
up = point + self.width
else:
up = -1
if point >= self.width:
down = point - self.width
else:
down = -1
return up, down, left, right
def get_current_availables(self) -> []:
if self.current_player == self.players[0]:
return self.availables_1
else:
return self.availables_2
# def set_current_availables(self) -> None:
# if self.current_player == self.players[0]:
# self.availables = self.availables_1
# else:
# self.availables = self.availables_2
def get_ancestor(self, move: int) -> int: # 找祖先
while True:
if self.disjoint[move].parent == move:
return move
else:
move = self.disjoint[move].parent
class Game(object):
"""game server"""
def __init__(self, board, **kwargs):
self.board = board # 初始化棋盘
def start_play(self, player1, player2, start_player=0, is_shown=1): # 下棋
"""start a game between two players"""
line = input().strip()
full_input = json.loads(line)
requests = full_input['requests']
x = requests[0]['x']
y = requests[0]['y']
if x < 0:
start_player = 1
else:
start_player = 0
self.board.init_board(start_player) # 初始化棋盘
p1, p2 = self.board.players # 两个棋手
player1.set_player_ind(p1)
player2.set_player_ind(p2)
players = {p1: player1, p2: player2}
if x >= 0:
current_player = self.board.get_current_player() # 获取当前棋手
player_in_turn = players[current_player]
move = 80 - (x + 1) * 9 + y + 1
self.board.do_move(move) # 进行落子
while True:
current_player = self.board.get_current_player() # 获取当前棋手
player_in_turn = players[current_player]
# self.board.set_current_availables() # 设定当前使用的availables
move = player_in_turn.get_action(self.board)
self.board.do_move(move) # 进行落子
def softmax(x):
probs = np.exp(x - np.max(x))
probs /= np.sum(probs)
return probs
class TreeNode(object):
"""A node in the MCTS tree.
Each node keeps track of its own value Q, prior probability P, and
its visit-count-adjusted prior score u.
"""
def __init__(self, parent, prior_p):
self._parent = parent
self._children = {} # a map from action to TreeNode
self._n_visits = 0
self._Q = 0
self._u = 0
self._P = prior_p
def expand(self, action_priors):
"""Expand tree by creating new children.
action_priors: a list of tuples of actions and their prior probability
according to the policy function.
"""
for action, prob in action_priors:
if action not in self._children:
self._children[action] = TreeNode(self, prob)
def select(self, c_puct):
"""Select action among children that gives maximum action value Q
plus bonus u(P).
Return: A tuple of (action, next_node)
"""
return max(self._children.items(),
key=lambda act_node: act_node[1].get_value(c_puct))
def update(self, leaf_value):
"""Update node values from leaf evaluation.
leaf_value: the value of subtree evaluation from the current player's
perspective.
"""
# Count visit.
self._n_visits += 1
# Update Q, a running average of values for all visits.
self._Q += 1.0 * (leaf_value - self._Q) / self._n_visits
def update_recursive(self, leaf_value):
"""Like a call to update(), but applied recursively for all ancestors.
"""
# If it is not root, this node's parent should be updated first.
if self._parent:
self._parent.update_recursive(-leaf_value)
self.update(leaf_value)
def get_value(self, c_puct):
"""Calculate and return the value for this node.
It is a combination of leaf evaluations Q, and this node's prior
adjusted for its visit count, u.
c_puct: a number in (0, inf) controlling the relative impact of
value Q, and prior probability P, on this node's score.
"""
self._u = (c_puct * self._P *
np.sqrt(self._parent._n_visits) / (1 + self._n_visits))
return | |
<reponame>linleon1995/prior_guiding_network
import numpy as np
import tensorflow as tf
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import slim as contrib_slim
from core import resnet_v1_beta, preprocess_utils, cell, attentions
slim = contrib_slim
resnet_v1_beta_block = resnet_v1_beta.resnet_v1_beta_block
def guidance_fusion_method(logits, guid_fuse, num_class, out_node, level):
if self.guid_fuse == "sum":
guid = tf.reduce_sum(logits, axis=3, keepdims=True)
elif self.guid_fuse == "mean":
guid = tf.reduce_mean(logits, axis=3, keepdims=True)
elif self.guid_fuse == "entropy":
guid = tf.clip_by_value(logits, 1e-10, 1.0)
guid = -tf.reduce_sum(guid * tf.log(guid), axis=3, keepdims=True)
elif self.guid_fuse == "conv":
if level < len(self.low_level)-1:
guid = slim.conv2d(logits, out_node, kernel_size=[3,3], activation_fn=None)
else:
guid = tf.reduce_sum(logits, axis=3, keepdims=True)
elif self.guid_fuse == "sum_dilated":
size = [8,6,4,2,1]
kernel = tf.ones((size[level], size[level], num_class))
guid = tf.nn.dilation2d(logits, filter=kernel, strides=(1,1,1,1),
rates=(1,1,1,1), padding="SAME")
guid = guid - tf.ones_like(guid)
guid = tf.reduce_sum(guid, axis=3, keepdims=True)
elif self.guid_fuse == "w_sum":
w = tf.nn.softmax(tf.reduce_sum(logits, axis=[1,2], keepdims=True), axis=3)
rev_w = tf.ones_like(w) - w
guid = tf.reduce_sum(tf.multiply(logits, rev_w), axis=3, keepdims=True)
elif self.guid_fuse == "conv_sum":
k_size_list = [1,1,1,3,5]
k_size = 2 * k_size_list[level] + 1
guid = slim.conv2d(logits, 1, kernel_size=[k_size,k_size], activation_fn=None,
weights_initializer=tf.ones_initializer(), trainable=False, normalizer_fn=None)
guid = guid / (k_size*k_size*num_class*1)
elif self.guid_fuse == "w_sum_conv":
# TODO: make it right
k_size_list = [1,1,1,2,4]
k_size = 3 * k_size_list[level] + 1
w = tf.reduce_sum(logits, axis=[1,2], keepdims=True)
rev_w = (tf.ones_like(w)+1e-5) / (tf.sqrt(w)+1e-5)
rev_w = tf.tile(rev_w, [1,k_size,k_size,1])
rev_w = tf.expand_dims(rev_w, axis=4)
n, h, w, channels_img = preprocess_utils.resolve_shape(logits, rank=4)
n, fh, fw, channels, out_channels = preprocess_utils.resolve_shape(rev_w, rank=5)
# F has shape (n, k_size, k_size, channels, out_channels)
rev_w = tf.transpose(rev_w, [1, 2, 0, 3, 4])
rev_w = tf.reshape(rev_w, [fh, fw, channels*n, out_channels])
guid = tf.transpose(logits, [1, 2, 0, 3]) # shape (H, W, MB, channels_img)
guid = tf.reshape(guid, [1, h, w, n*channels_img])
out = tf.nn.depthwise_conv2d(
guid,
filter=rev_w,
strides=[1, 1, 1, 1],
padding="SAME") # here no requirement about padding being 'VALID', use whatever you want.
# Now out shape is (1, H-fh+1, W-fw+1, MB*channels*out_channels), because we used "VALID"
out = tf.reshape(out, [h, w, n, channels, out_channels])
out = tf.transpose(out, [2, 0, 1, 3, 4])
out = tf.reduce_sum(out, axis=3)
guid = out
elif self.guid_fuse == "sum_wo_back":
flag = tf.concat([tf.zeros([1,1,1,1]), tf.ones([1,1,1,num_class-1])], axis=3)
guid = tf.multiply(logits, flag)
guid = tf.reduce_sum(guid, axis=3, keepdims=True)
elif self.guid_fuse == "mean_wo_back":
flag = tf.concat([tf.zeros([1,1,1,1]), tf.ones([1,1,1,num_class-1])], axis=3)
guid = tf.multiply(logits, flag)
guid = tf.reduce_mean(guid, axis=3, keepdims=True)
elif self.guid_fuse == "same":
pass
else:
raise ValueError("Unknown guid fuse")
tf.add_to_collection("guidance", guid)
return guid
class Refine(object):
def __init__(self, low_level, fusions, prior_seg=None, prior_pred=None, stage_pred_loss_name=None, guid_conv_nums=2,
guid_conv_type="conv2d", embed_node=32, predict_without_background=False,
num_class=14, weight_decay=0.0, scope=None, is_training=None, **kwargs):
self.low_level = low_level.values()
self.fusions = fusions
self.prior_seg = prior_seg
self.prior_pred = prior_pred
self.stage_pred_loss_name = stage_pred_loss_name
self.embed_node = embed_node
self.guid_conv_type = guid_conv_type
self.guid_conv_nums = guid_conv_nums
self.predict_without_background = predict_without_background
self.num_class = num_class
self.weight_decay = weight_decay
self.scope = scope
# assert len(self.low_level) == len(self.fusions)
self.num_stage = len(self.low_level)
self.is_training = is_training
self.fine_tune_batch_norm = True
self.apply_sram2 = kwargs.pop("apply_sram2", False)
self.guid_fuse = kwargs.pop("guid_fuse", "sum")
self.model_variants = kwargs.pop("model_variants", None)
self.height = kwargs.pop("height", None)
self.width = kwargs.pop("width", None)
self.g = guidance_fusion_method
# self.attention = utils.
def get_fusion_method(self, method):
if method == "concat":
return concat_convolution
elif method == "sum":
return sum_convolution
elif method in ("guid", "guid_uni"):
return guid_attention
elif method == "guid_class":
return guid_class_attention
elif method == "context_att":
return context_attention
elif method == "self_att":
return self_attention
def simple_decoder(self):
# TODO: without consider feature size
for i in range(len(self.low_level)):
net = slim.conv2d(self.low_level[i], self.embed_node, kernel_size=[1, 1], scope="embed%d" %(self.num_stage-i))
if i > 0:
fuse_func = self.get_fusion_method(self.fusions[i])
net = fuse_func(net, fusion)
fusion = slim.conv2d(net, self.embed_node, kernel_size=[3, 3], scope="transform%d" %(self.num_stage-i))
return fusion
# def refine_decoder(self):
# # def func(x, fuse, guid=None, apply_second_att=True):
# # embed = self.e(x)
# # if guid is None:
# # guid = self.g(x)
# # net = attention(embed, guid)
# # net = net + fuse
# # if apply_second_att:
# # net = attention(net, guid)
# # return net
# fuse_func = get_fusion_method(self.fusions[0])
# # f = fuse_func(self.low_level[0], self.e(self.low_level[0])) # feature_fusion_method
# guid = self.g(self.e(self.low_level[0]), self.guid_fuse, self.num_class, self.embed_node, self.num_stage)
# fuse = self.t(f)
# for i in range(1, len(self.low_level)):
# fuse_func = get_fusion_method(self.fusions[i])
# # f = fuse_func(self.low_level[i], fuse, guid)
# if i < len(self.low_level)-1:
# guid = self.g(f, self.guid_fuse, self.num_class, self.embed_node, self.num_stage-i)
# fuse = self.e(f)
# y = resize_bilinear(fuse, [2*h, 2*w])
# y = slim.conv2d(y, self.embed_node, scope="decoder_output")
# y = slim.conv2d(y, self.num_class, kernel_size=[1, 1], stride=1, activation_fn=None, scope='logits_pred_class%d' %self.num_class)
# return y
def model(self):
batch_norm = slim.batch_norm
batch_norm_params = get_batch_norm_params(decay=0.9997,
epsilon=1e-5,
scale=True,
is_training=(self.is_training and self.fine_tune_batch_norm),
# sync_batch_norm_method=model_options.sync_batch_norm_method
)
with tf.variable_scope(self.scope, 'Decoder'):
with slim.arg_scope([slim.conv2d],
trainable=True,
activation_fn=tf.nn.relu,
weights_initializer=tf.initializers.he_normal(),
weights_regularizer=slim.l2_regularizer(self.weight_decay),
kernel_size=[3, 3],
padding='SAME',
normalizer_fn=slim.batch_norm):
with slim.arg_scope([batch_norm], **batch_norm_params):
with tf.variable_scope(self.model_variants, "decoder"):
if self.model_variants == "unet" or self.model_variants == "fpn":
feature = self.simple_decoder()
elif self.model_variants == "refine":
feature = self.refine_decoder()
else:
raise ValueError("Unknown decoder type")
y = resize_bilinear(feature, [self.height, self.width])
y = slim.conv2d(y, self.embed_node, scope="decoder_output")
y = slim.conv2d(
y, self.num_class, kernel_size=[1, 1], stride=1, activation_fn=None, scope='logits_pred_class%d' %self.num_class)
return y
def slim_sram(in_node,
guidance,
num_conv=1,
conv_type="conv",
conv_node=64,
scope=None):
"""Single Residual Attention Module"""
with tf.variable_scope(scope, "sram", reuse=tf.AUTO_REUSE):
# channel = in_node.get_shape().as_list()[3]
net = in_node
if conv_type == "conv":
conv_op = slim.conv2d
elif conv_type == "separable_conv":
conv_op = slim.separable_conv2d
else:
raise ValueError("Unknown convolution type")
for i in range(num_conv-1):
net = conv_op(net, conv_node, kernel_size=[3,3], scope=conv_type+str(i+1))
net = conv_op(net, conv_node, kernel_size=[3,3], scope=conv_type+"out", activation_fn=None)
# for i in range(num_conv):
# net = conv_op(net, conv_node, kernel_size=[3,3], scope=conv_type+str(i+1))
# guidance_filters = guidance.get_shape().as_list()[3]
guidance_filters = preprocess_utils.resolve_shape(guidance, rank=4)[3]
if guidance_filters == 1:
guidance_tile = tf.tile(guidance, [1,1,1,conv_node])
elif guidance_filters == conv_node:
guidance_tile = guidance
else:
raise ValueError("Unknown guidance filters number")
# tf.add_to_collection("/sram_embed", {"in_node": in_node,
# "conv2": conv2,
# "guidance_tile": guidance_tile,
# "output": output})
output = in_node + tf.multiply(net, guidance_tile)
tf.add_to_collection(scope+"_guided_feature", tf.multiply(net, guidance_tile))
return output
class Refine(object):
def __init__(self, low_level, fusions, prior_seg=None, prior_pred=None, stage_pred_loss_name=None, guid_conv_nums=2,
guid_conv_type="conv2d", embed_node=32, predict_without_background=False,
num_class=14, weight_decay=0.0, scope=None, is_training=None, **kwargs):
self.low_level = list(low_level.values())
self.fusions = fusions
self.prior_seg = prior_seg
self.prior_pred = prior_pred
self.stage_pred_loss_name = stage_pred_loss_name
self.embed_node = embed_node
self.guid_conv_type = guid_conv_type
self.guid_conv_nums = guid_conv_nums
self.predict_without_background = predict_without_background
self.num_class = num_class
self.weight_decay = weight_decay
self.scope = scope
# assert len(self.low_level) == len(self.fusions)
self.num_stage = len(self.low_level)
self.is_training = is_training
self.fine_tune_batch_norm = True
self.apply_sram2 = kwargs.pop("apply_sram2", False)
self.guid_fuse = kwargs.pop("guid_fuse", "sum")
def embed(self, x, embed_method, out_node, scope):
if embed_method == "self_att":
sa_layer = attentions.self_attention(out_node)
net = sa_layer(x, x, x, scope)
else:
net = slim.conv2d(x, out_node, kernel_size=[1,1], scope=scope)
return net
def model(self):
# TODO: reolve_shape
# TODO: image size
# TODO: Remove after finish code
batch_norm = slim.batch_norm
batch_norm_params = get_batch_norm_params(decay=0.9997,
epsilon=1e-5,
scale=True,
is_training=(self.is_training and self.fine_tune_batch_norm),
# sync_batch_norm_method=model_options.sync_batch_norm_method
)
with tf.variable_scope(self.scope, 'Refine_Network'):
with slim.arg_scope([slim.conv2d],
trainable=True,
activation_fn=tf.nn.relu,
weights_initializer=tf.initializers.he_normal(),
weights_regularizer=slim.l2_regularizer(self.weight_decay),
kernel_size=[3, 3],
padding='SAME',
normalizer_fn=slim.batch_norm):
with slim.arg_scope([batch_norm], **batch_norm_params):
y_tm1 = self.prior_seg
# TODO: Would default vars value causes error?
if self.prior_seg is not None or self.prior_pred is not None:
if "guid" in self.fusions:
guid = self.prior_seg
elif "guid_class" in self.fusions:
guid = self.prior_pred
elif "guid_uni" in self.fusions or "context_att" in self.fusions or "self_att" in self.fusions:
guid = tf.reduce_mean(self.prior_pred, axis=3, keepdims=True)
out_node = self.embed_node
tf.add_to_collection("guidance", guid)
for i, v in enumerate(self.low_level):
module_order = self.num_stage-i
fuse_method = self.fusions[i]
embed = self.embed(v, fuse_method, out_node, scope="embed%d" %module_order)
tf.add_to_collection("embed", embed)
fuse_func = self.get_fusion_method(fuse_method)
h, w = preprocess_utils.resolve_shape(embed, rank=4)[1:3]
if y_tm1 is not None:
y_tm1 = resize_bilinear(y_tm1, [h, w])
tf.add_to_collection("feature", y_tm1)
else:
# TODO: remove
tf.add_to_collection("feature", tf.zeros_like(embed))
if fuse_method in ("concat", "sum"):
if y_tm1 is not None:
y = fuse_func(embed, y_tm1, out_node, fuse_method+str(module_order))
else:
y = tf.identity(embed, name="identity%d" %module_order)
elif fuse_method in ("guid", "guid_class", "guid_uni", "context_att", "self_att"):
# guid = resize_bilinear(guid, [h, w])
if guid is not None:
guid = resize_bilinear(guid, [h, w])
# tf.add_to_collection("guid", guid)
fuse = fuse_func(embed, y_tm1, guid, out_node, fuse_method+str(module_order),
num_classes=self.num_class, apply_sram2=self.apply_sram2)
"""
fuse = tf.reshape(fuse, [4, 3, h, w, out_node])
_, fuse = seq_model(fuse, h, w, out_node, self.weight_decay, self.is_training,
scope="gru"+str(i), cell_type='ConvGRU', output_wo_fuse=True)
fuse = tf.reshape(fuse, [-1, h, w, out_node])
"""
y = slim.conv2d(fuse, self.embed_node, scope='fuse'+str(i))
tf.add_to_collection("refining", y)
if self.stage_pred_loss_name is not None:
num_class = self.num_class
if self.predict_without_background:
num_class -= 1
stage_pred = slim.conv2d(fuse, num_class, kernel_size=[1,1], activation_fn=None,
scope="stage_pred%d_pred_class%d" %(module_order,num_class))
# preds["guidance%d" %module_order] = stage_pred
tf.add_to_collection("stage_pred", stage_pred)
if fuse_method in ("guid"):
guid = y
y_tm1 = None
elif fuse_method in ("guid_class", "guid_uni", "context_att", "self_att"):
if i < len(self.low_level)-1:
if "softmax" in | |
lambda R, i: extrapolator_method(
R[i, :, :], V, ar_order - i, "min", **extrap_kwargs
)[-1]
for i in range(ar_order):
if not dask_imported:
R[i, :, :] = f(R, i)
else:
res.append(dask.delayed(f)(R, i))
if dask_imported:
num_workers_ = len(res) if num_workers > len(res) else num_workers
R = np.stack(list(dask.compute(*res, num_workers=num_workers_)) + [R[-1, :, :]])
if mask_method == "incremental":
# get mask parameters
mask_rim = mask_kwargs.get("mask_rim", 10)
mask_f = mask_kwargs.get("mask_f", 1.0)
# initialize the structuring element
struct = scipy.ndimage.generate_binary_structure(2, 1)
# iterate it to expand it nxn
n = mask_f * timestep / kmperpixel
struct = scipy.ndimage.iterate_structure(struct, int((n - 1) / 2.0))
noise_kwargs.update(
{
"win_size": win_size,
"overlap": overlap,
"war_thr": war_thr,
"rm_rdisc": True,
"donorm": True,
}
)
print("Estimating nowcast parameters...", end="")
def estimator(R, parsglob=None, idxm=None, idxn=None):
pars = {}
# initialize the perturbation generator for the precipitation field
if noise_method is not None and parsglob is None:
P = init_noise(R, fft_method=fft_method, **noise_kwargs)
else:
P = None
pars["P"] = P
# initialize the band-pass filter
if parsglob is None:
filter = filter_method(R.shape[1:], n_cascade_levels, **filter_kwargs)
pars["filter"] = filter
else:
pars["filter"] = None
# compute the cascade decompositions of the input precipitation fields
if parsglob is None:
R_d = []
for i in range(ar_order + 1):
R_d_ = decomp_method(R[i, :, :], filter, fft_method=fft_method,
normalize=True, compute_stats=True)
R_d.append(R_d_)
R_d_ = None
# normalize the cascades and rearrange them into a four-dimensional array
# of shape (n_cascade_levels,ar_order+1,m,n) for the autoregressive model
if parsglob is None:
R_c = nowcast_utils.stack_cascades(R_d, n_cascade_levels)
mu = R_d[-1]["means"]
sigma = R_d[-1]["stds"]
R_d = None
else:
R_c = parsglob["R_c"][0][
:, :, idxm.item(0) : idxm.item(1), idxn.item(0) : idxn.item(1)
].copy()
mu = np.mean(R_c, axis=(2, 3))
sigma = np.std(R_c, axis=(2, 3))
R_c = (R_c - mu[:, :, None, None]) / sigma[:, :, None, None]
mu = mu[:, -1]
sigma = sigma[:, -1]
pars["mu"] = mu
pars["sigma"] = sigma
# compute lag-l temporal autocorrelation coefficients for each cascade level
GAMMA = np.empty((n_cascade_levels, ar_order))
for i in range(n_cascade_levels):
R_c_ = np.stack([R_c[i, j, :, :] for j in range(ar_order + 1)])
GAMMA[i, :] = correlation.temporal_autocorrelation(R_c_)
R_c_ = None
if ar_order == 2:
# adjust the local lag-2 correlation coefficient to ensure that the AR(p)
# process is stationary
for i in range(n_cascade_levels):
GAMMA[i, 1] = autoregression.adjust_lag2_corrcoef2(
GAMMA[i, 0], GAMMA[i, 1]
)
# estimate the parameters of the AR(p) model from the autocorrelation
# coefficients
PHI = np.empty((n_cascade_levels, ar_order + 1))
for i in range(n_cascade_levels):
PHI[i, :] = autoregression.estimate_ar_params_yw(GAMMA[i, :])
pars["PHI"] = PHI
# stack the cascades into a five-dimensional array containing all ensemble
# members
R_c = [R_c.copy() for i in range(n_ens_members)]
pars["R_c"] = R_c
if mask_method is not None and parsglob is None:
MASK_prec = R[-1, :, :] >= R_thr
if mask_method == "incremental":
# initialize precip mask for each member
MASK_prec = _compute_incremental_mask(MASK_prec, struct, mask_rim)
MASK_prec = [MASK_prec.copy() for j in range(n_ens_members)]
else:
MASK_prec = None
pars["MASK_prec"] = MASK_prec
return pars
# prepare windows
M, N = R.shape[1:]
n_windows_M = np.ceil(1.0 * M / win_size[0]).astype(int)
n_windows_N = np.ceil(1.0 * N / win_size[1]).astype(int)
idxm = np.zeros((2, 1), dtype=int)
idxn = np.zeros((2, 1), dtype=int)
sys.stdout.flush()
if measure_time:
starttime = time.time()
# compute global parameters to be used as defaults
parsglob = estimator(R)
# loop windows
if n_windows_M > 1 or n_windows_N > 1:
war = np.empty((n_windows_M, n_windows_N))
PHI = np.empty((n_windows_M, n_windows_N, n_cascade_levels, ar_order + 1))
mu = np.empty((n_windows_M, n_windows_N, n_cascade_levels))
sigma = np.empty((n_windows_M, n_windows_N, n_cascade_levels))
ff = []
rc = []
pp = []
mm = []
for m in range(n_windows_M):
ff_ = []
pp_ = []
rc_ = []
mm_ = []
for n in range(n_windows_N):
# compute indices of local window
idxm[0] = int(np.max((m * win_size[0] - overlap * win_size[0], 0)))
idxm[1] = int(
np.min((idxm[0] + win_size[0] + overlap * win_size[0], M))
)
idxn[0] = int(np.max((n * win_size[1] - overlap * win_size[1], 0)))
idxn[1] = int(
np.min((idxn[0] + win_size[1] + overlap * win_size[1], N))
)
mask = np.zeros((M, N), dtype=bool)
mask[idxm.item(0) : idxm.item(1), idxn.item(0) : idxn.item(1)] = True
R_ = R[:, idxm.item(0) : idxm.item(1), idxn.item(0) : idxn.item(1)]
war[m, n] = np.sum(R_[-1, :, :] >= R_thr) / R_[-1, :, :].size
if war[m, n] > war_thr:
# estimate local parameters
pars = estimator(R, parsglob, idxm, idxn)
ff_.append(pars["filter"])
pp_.append(pars["P"])
rc_.append(pars["R_c"])
mm_.append(pars["MASK_prec"])
mu[m, n, :] = pars["mu"]
sigma[m, n, :] = pars["sigma"]
PHI[m, n, :, :] = pars["PHI"]
else:
# dry window
ff_.append(None)
pp_.append(None)
rc_.append(None)
mm_.append(None)
ff.append(ff_)
pp.append(pp_)
rc.append(rc_)
mm.append(mm_)
# remove unnecessary variables
ff_ = None
pp_ = None
rc_ = None
mm_ = None
pars = None
if measure_time:
print("%.2f seconds." % (time.time() - starttime))
else:
print(" done.")
# initialize the random generators
if noise_method is not None:
randgen_prec = []
randgen_motion = []
np.random.seed(seed)
for j in range(n_ens_members):
rs = np.random.RandomState(seed)
randgen_prec.append(rs)
seed = rs.randint(0, high=1e9)
rs = np.random.RandomState(seed)
randgen_motion.append(rs)
seed = rs.randint(0, high=1e9)
if vel_pert_method is not None:
init_vel_noise, generate_vel_noise = noise.get_method(vel_pert_method)
# initialize the perturbation generators for the motion field
vps = []
for j in range(n_ens_members):
kwargs = {
"randstate": randgen_motion[j],
"p_par": vp_par,
"p_perp": vp_perp,
}
vp_ = init_vel_noise(V, 1.0 / kmperpixel, timestep, **kwargs)
vps.append(vp_)
D = [None for j in range(n_ens_members)]
R_f = [[] for j in range(n_ens_members)]
if measure_time:
init_time = time.time() - starttime_init
R = R[-1, :, :]
print("Starting nowcast computation.")
if measure_time:
starttime_mainloop = time.time()
# iterate each time step
for t in range(n_timesteps):
print("Computing nowcast for time step %d... " % (t + 1), end="")
sys.stdout.flush()
if measure_time:
starttime = time.time()
# iterate each ensemble member
def worker(j):
# first the global step
if noise_method is not None:
# generate noise field
EPS = generate_noise(
parsglob["P"], randstate=randgen_prec[j], fft_method=fft_method
)
# decompose the noise field into a cascade
EPS_d = decomp_method(EPS, parsglob["filter"], fft_method=fft_method,
normalize=True, compute_stats=True)
else:
EPS_d = None
# iterate the AR(p) model for each cascade level
R_c = parsglob["R_c"][j].copy()
if R_c.shape[1] >= ar_order:
R_c = R_c[:, -ar_order:, :, :].copy()
for i in range(n_cascade_levels):
# normalize the noise cascade
if EPS_d is not None:
EPS_ = (
EPS_d["cascade_levels"][i, :, :] - EPS_d["means"][i]
) / EPS_d["stds"][i]
else:
EPS_ = None
# apply AR(p) process to cascade level
R_c[i, :, :, :] = autoregression.iterate_ar_model(
R_c[i, :, :, :], parsglob["PHI"][i, :], eps=EPS_
)
EPS_ = None
parsglob["R_c"][j] = R_c.copy()
EPS = None
# compute the recomposed precipitation field(s) from the cascades
# obtained from the AR(p) model(s)
R_c_ = _recompose_cascade(R_c, parsglob["mu"], parsglob["sigma"])
R_c = None
# then the local steps
if n_windows_M > 1 or n_windows_N > 1:
idxm = np.zeros((2, 1), dtype=int)
idxn = np.zeros((2, 1), dtype=int)
R_l = np.zeros((M, N), dtype=float)
M_s = np.zeros((M, N), dtype=float)
for m in range(n_windows_M):
for n in range(n_windows_N):
# compute indices of local window
idxm[0] = int(
np.max((m * win_size[0] - overlap * win_size[0], 0))
)
idxm[1] = int(
np.min((idxm[0] + win_size[0] + overlap * win_size[0], M))
)
idxn[0] = int(
np.max((n * win_size[1] - overlap * win_size[1], 0))
)
idxn[1] = int(
np.min((idxn[0] + win_size[1] + overlap * win_size[1], N))
)
# build localization mask
mask = _get_mask((M, N), idxm, idxn)
mask_l = mask[
idxm.item(0) : idxm.item(1), idxn.item(0) : idxn.item(1)
]
M_s += mask
# skip if dry
if war[m, n] > war_thr:
R_c = rc[m][n][j].copy()
if R_c.shape[1] >= ar_order:
R_c = R_c[:, -ar_order:, :, :]
if noise_method is not None:
# extract noise field
EPS_d_l = EPS_d["cascade_levels"][
:,
idxm.item(0) : idxm.item(1),
idxn.item(0) : idxn.item(1),
].copy()
mu_ = np.mean(EPS_d_l, axis=(1, 2))
sigma_ = np.std(EPS_d_l, axis=(1, 2))
else:
EPS_d_l = None
# iterate the AR(p) model for each cascade level
for i in range(n_cascade_levels):
# normalize the noise cascade
if EPS_d_l is not None:
EPS_ = (
EPS_d_l[i, :, :] - mu_[i, None, None]
) / sigma_[i, None, None]
else:
EPS_ = None
# apply AR(p) process to cascade level
R_c[i, :, :, :] = autoregression.iterate_ar_model(
R_c[i, :, :, :], PHI[m, n, i, :], eps=EPS_
)
EPS_ = None
rc[m][n][j] = R_c.copy()
| |
#
# © Copyright 2020 Hewlett Packard Enterprise Development LP
#
# This file was auto-generated by the Python SDK generator; DO NOT EDIT.
#
from ..resource import Resource, Collection
from ..exceptions import NimOSAPIOperationUnsupported
from .snapshots import SnapshotList
from .volume_collections import VolumeCollection
class Volume(Resource):
"""
Volumes are the basic storage units from which the total capacity is apportioned. The terms volume and LUN are used interchangeably.The number of volumes per array depends on storage allocation.
Parameters:
- id : Identifier for the volume.
- name : Name of the volume.
- full_name : Fully qualified name of volume.
- search_name : Name of volume used for object search.
- size : Volume size in mebibytes. Size is required for creating a volume but not for cloning an existing volume.
- description : Text description of volume.
- perfpolicy_name : Name of performance policy.
- perfpolicy_id : Identifier of the performance policy. After creating a volume, performance policy for the volume can only be changed to another performance policy with same block size.
- reserve : Amount of space to reserve for this volume as a percentage of volume size.
- warn_level : This attribute is deprecated. Alert threshold for the volume's mapped usage, expressed as a percentage of the volume's size. When the volume's mapped usage exceeds warn_level, the array issues an alert. If this option is not specified, array default volume warn level setting is used to decide the warning level for this volume.
- limit : Limit on the volume's mapped usage, expressed as a percentage of the volume's size. When the volume's mapped usage exceeds limit, the volume will be offlined or made non-writable. If this option is not specified, array default volume limit setting is used to decide the limit for this volume.
- snap_reserve : Amount of space to reserve for snapshots of this volume as a percentage of volume size.
- snap_warn_level : Threshold for available space as a percentage of volume size below which an alert is raised.
- snap_limit : This attribute is deprecated. The array does not limit a volume's snapshot space usage. The attribute is ignored on input and returns max int64 on output.
- snap_limit_percent : This attribute is deprecated. The array does not limit a volume's snapshot space usage. The attribute is ignored on input and returns -1 on output.
- num_snaps : Number of live, non-hidden snapshots for this volume.
- projected_num_snaps : Deprecated. Projected number of snapshots (including scheduled and manual) for this volume.
- online : Online state of volume, available for host initiators to establish connections.
- owned_by_group : Name of group that currently owns the volume.
- owned_by_group_id : ID of group that currently owns the volume.
- multi_initiator : For iSCSI Volume Target, this flag indicates whether the volume and its snapshots can be accessed from multiple initiators at the same time. The default is false. For iSCSI Group Target or FC access protocol, the attribute cannot be modified and always reads as false.
- iscsi_target_scope : This indicates whether volume is exported under iSCSI Group Target or iSCSI Volume Target. This attribute is only meaningful to iSCSI system. On FC system, all volumes are exported under the FC Group Target. In create operation, the volume's target type will be set by this attribute. If not specified, it will be set as the group-setting. In clone operation, the clone's target type will inherit from the parent' setting.
- pool_name : Name of the pool where the volume resides. Volume data will be distributed across arrays over which specified pool is defined. If pool option is not specified, volume is assigned to the default pool.
- pool_id : Identifier associated with the pool in the storage pool table.
- read_only : Volume is read-only.
- serial_number : Identifier associated with the volume for the SCSI protocol.
- secondary_serial_number : Secondary identifier associated with the volume for the SCSI protocol.
- target_name : The iSCSI Qualified Name (IQN) or the Fibre Channel World Wide Node Name (WWNN) of the target volume.
- block_size : Size in bytes of blocks in the volume.
- offline_reason : Volume offline reason.
- clone : Whether this volume is a clone. Use this attribute in combination with name and base_snap_id to create a clone by setting clone = true.
- parent_vol_name : Name of parent volume.
- parent_vol_id : Parent volume ID.
- base_snap_name : Name of base snapshot.
- base_snap_id : Base snapshot ID. This attribute is required together with name and clone when cloning a volume with the create operation.
- replication_role : Replication role that this volume performs.
- volcoll_name : Name of volume collection of which this volume is a member.
- volcoll_id : ID of volume collection of which this volume is a member. Use this attribute in update operation to associate or dissociate volumes with or from volume collections. When associating, set this attribute to the ID of the volume collection. When dissociating, set this attribute to empty string.
- agent_type : External management agent type.
- force : Forcibly offline, reduce size or change read-only status a volume.
- creation_time : Time when this volume was created.
- last_modified : Time when this volume was last modified.
- protection_type : Specifies if volume is protected with schedules. If protected, indicate whether replication is setup.
- last_snap : Last snapshot for this volume.
- last_replicated_snap : Last replicated snapshot for this volume.
- dest_pool_name : Name of the destination pool where the volume is moving to.
- dest_pool_id : ID of the destination pool where the volume is moving to.
- move_start_time : The Start time when this volume was moved.
- move_aborting : This indicates whether the move of the volume is aborting or not.
- move_bytes_migrated : The bytes of volume which have been moved.
- move_bytes_remaining : The bytes of volume which have not been moved.
- move_est_compl_time : The estimated time of completion of a move.
- usage_valid : This indicates whether usage information of volume and snapshots are valid or not.
- space_usage_level : Indicates space usage level based on warning level.
- total_usage_bytes : Sum of volume mapped usage and uncompressed backup data(including pending deletes) in bytes of this volume.
- vol_usage_compressed_bytes : Compressed data in bytes for this volume.
- vol_usage_uncompressed_bytes : Uncompressed data in bytes for this volume.
- vol_usage_mapped_bytes : Mapped data in bytes for this volume.
- snap_usage_compressed_bytes : Sum of compressed backup data in bytes stored in snapshots of this volume.
- snap_usage_uncompressed_bytes : Sum of uncompressed unique backup data in bytes stored in snapshots of this volume.
- snap_usage_populated_bytes : Sum of backup data in bytes stored in snapshots of this volume without accounting for the sharing of data between snapshots.
- cache_pinned : If set to true, all the contents of this volume are kept in flash cache. This provides for consistent performance guarantees for all types of workloads. The amount of flash needed to pin the volume is equal to the limit for the volume.
- pinned_cache_size : The amount of flash pinned on the volume.
- cache_needed_for_pin : The amount of flash needed to pin the volume.
- upstream_cache_pinned : This indicates whether the upstream volume is cache pinned or not.
- cache_policy : Cache policy applied to the volume.
- thinly_provisioned : Set volume's provisioning level to thin. Also advertises volume as thinly provisioned to initiators supporting thin provisioning. For such volumes, soft limit notification is set to initiators when the volume space usage crosses its volume_warn_level. Default is yes. The volume's space is provisioned immediately, but for advertising status, this change takes effect only for new connections to the volume. Initiators must disconnect and | |
is never awaited" message
__slots___ = ('_method',)
def __init__(self, method):
self._method = method
def __await__(self):
return self._method().__await__()
class Stream:
"""Wraps a Transport.
This exposes write(), writelines(), [can_]write_eof(),
get_extra_info() and close(). It adds drain() which returns an
optional Future on which you can wait for flow control. It also
adds a transport property which references the Transport
directly.
"""
_source_traceback = None
def __init__(self, mode, *,
transport=None,
protocol=None,
loop=None,
limit=_DEFAULT_LIMIT,
is_server_side=False,
_asyncio_internal=False):
if not _asyncio_internal:
warnings.warn(f"{self.__class__} should be instaniated "
"by asyncio internals only, "
"please avoid its creation from user code",
DeprecationWarning)
self._mode = mode
self._transport = transport
self._protocol = protocol
self._is_server_side = is_server_side
# The line length limit is a security feature;
# it also doubles as half the buffer limit.
if limit <= 0:
raise ValueError('Limit cannot be <= 0')
self._limit = limit
if loop is None:
self._loop = events.get_event_loop()
else:
self._loop = loop
self._buffer = bytearray()
self._eof = False # Whether we're done.
self._waiter = None # A future used by _wait_for_data()
self._exception = None
self._paused = False
self._complete_fut = self._loop.create_future()
self._complete_fut.set_result(None)
if self._loop.get_debug():
self._source_traceback = format_helpers.extract_stack(
sys._getframe(1))
def __repr__(self):
info = [self.__class__.__name__]
info.append(f'mode={self._mode}')
if self._buffer:
info.append(f'{len(self._buffer)} bytes')
if self._eof:
info.append('eof')
if self._limit != _DEFAULT_LIMIT:
info.append(f'limit={self._limit}')
if self._waiter:
info.append(f'waiter={self._waiter!r}')
if self._exception:
info.append(f'exception={self._exception!r}')
if self._transport:
info.append(f'transport={self._transport!r}')
if self._paused:
info.append('paused')
return '<{}>'.format(' '.join(info))
@property
def mode(self):
return self._mode
def is_server_side(self):
return self._is_server_side
@property
def transport(self):
return self._transport
def write(self, data):
_ensure_can_write(self._mode)
self._transport.write(data)
return self._fast_drain()
def writelines(self, data):
_ensure_can_write(self._mode)
self._transport.writelines(data)
return self._fast_drain()
def _fast_drain(self):
# The helper tries to use fast-path to return already existing
# complete future object if underlying transport is not paused
#and actual waiting for writing resume is not needed
exc = self.exception()
if exc is not None:
fut = self._loop.create_future()
fut.set_exception(exc)
return fut
if not self._transport.is_closing():
if self._protocol._connection_lost:
fut = self._loop.create_future()
fut.set_exception(ConnectionResetError('Connection lost'))
return fut
if not self._protocol._paused:
# fast path, the stream is not paused
# no need to wait for resume signal
return self._complete_fut
return _OptionalAwait(self.drain)
def write_eof(self):
_ensure_can_write(self._mode)
return self._transport.write_eof()
def can_write_eof(self):
if not self._mode.is_write():
return False
return self._transport.can_write_eof()
def close(self):
self._transport.close()
return _OptionalAwait(self.wait_closed)
def is_closing(self):
return self._transport.is_closing()
async def abort(self):
self._transport.abort()
await self.wait_closed()
async def wait_closed(self):
await self._protocol._get_close_waiter(self)
def get_extra_info(self, name, default=None):
return self._transport.get_extra_info(name, default)
async def drain(self):
"""Flush the write buffer.
The intended use is to write
w.write(data)
await w.drain()
"""
_ensure_can_write(self._mode)
exc = self.exception()
if exc is not None:
raise exc
if self._transport.is_closing():
# Wait for protocol.connection_lost() call
# Raise connection closing error if any,
# ConnectionResetError otherwise
await tasks.sleep(0)
await self._protocol._drain_helper()
async def sendfile(self, file, offset=0, count=None, *, fallback=True):
await self.drain() # check for stream mode and exceptions
return await self._loop.sendfile(self._transport, file,
offset, count, fallback=fallback)
async def start_tls(self, sslcontext, *,
server_hostname=None,
ssl_handshake_timeout=None):
await self.drain() # check for stream mode and exceptions
transport = await self._loop.start_tls(
self._transport, self._protocol, sslcontext,
server_side=self._is_server_side,
server_hostname=server_hostname,
ssl_handshake_timeout=ssl_handshake_timeout)
self._transport = transport
self._protocol._transport = transport
self._protocol._over_ssl = True
def exception(self):
return self._exception
def set_exception(self, exc):
self._exception = exc
waiter = self._waiter
if waiter is not None:
self._waiter = None
if not waiter.cancelled():
waiter.set_exception(exc)
def _wakeup_waiter(self):
"""Wakeup read*() functions waiting for data or EOF."""
waiter = self._waiter
if waiter is not None:
self._waiter = None
if not waiter.cancelled():
waiter.set_result(None)
def set_transport(self, transport):
if transport is self._transport:
return
assert self._transport is None, 'Transport already set'
self._transport = transport
def _maybe_resume_transport(self):
if self._paused and len(self._buffer) <= self._limit:
self._paused = False
self._transport.resume_reading()
def feed_eof(self):
self._eof = True
self._wakeup_waiter()
def at_eof(self):
"""Return True if the buffer is empty and 'feed_eof' was called."""
return self._eof and not self._buffer
def feed_data(self, data):
_ensure_can_read(self._mode)
assert not self._eof, 'feed_data after feed_eof'
if not data:
return
self._buffer.extend(data)
self._wakeup_waiter()
if (self._transport is not None and
not self._paused and
len(self._buffer) > 2 * self._limit):
try:
self._transport.pause_reading()
except NotImplementedError:
# The transport can't be paused.
# We'll just have to buffer all data.
# Forget the transport so we don't keep trying.
self._transport = None
else:
self._paused = True
async def _wait_for_data(self, func_name):
"""Wait until feed_data() or feed_eof() is called.
If stream was paused, automatically resume it.
"""
# StreamReader uses a future to link the protocol feed_data() method
# to a read coroutine. Running two read coroutines at the same time
# would have an unexpected behaviour. It would not possible to know
# which coroutine would get the next data.
if self._waiter is not None:
raise RuntimeError(
f'{func_name}() called while another coroutine is '
f'already waiting for incoming data')
assert not self._eof, '_wait_for_data after EOF'
# Waiting for data while paused will make deadlock, so prevent it.
# This is essential for readexactly(n) for case when n > self._limit.
if self._paused:
self._paused = False
self._transport.resume_reading()
self._waiter = self._loop.create_future()
try:
await self._waiter
finally:
self._waiter = None
async def readline(self):
"""Read chunk of data from the stream until newline (b'\n') is found.
On success, return chunk that ends with newline. If only partial
line can be read due to EOF, return incomplete line without
terminating newline. When EOF was reached while no bytes read, empty
bytes object is returned.
If limit is reached, ValueError will be raised. In that case, if
newline was found, complete line including newline will be removed
from internal buffer. Else, internal buffer will be cleared. Limit is
compared against part of the line without newline.
If stream was paused, this function will automatically resume it if
needed.
"""
_ensure_can_read(self._mode)
sep = b'\n'
seplen = len(sep)
try:
line = await self.readuntil(sep)
except exceptions.IncompleteReadError as e:
return e.partial
except exceptions.LimitOverrunError as e:
if self._buffer.startswith(sep, e.consumed):
del self._buffer[:e.consumed + seplen]
else:
self._buffer.clear()
self._maybe_resume_transport()
raise ValueError(e.args[0])
return line
async def readuntil(self, separator=b'\n'):
"""Read data from the stream until ``separator`` is found.
On success, the data and separator will be removed from the
internal buffer (consumed). Returned data will include the
separator at the end.
Configured stream limit is used to check result. Limit sets the
maximal length of data that can be returned, not counting the
separator.
If an EOF occurs and the complete separator is still not found,
an IncompleteReadError exception will be raised, and the internal
buffer will be reset. The IncompleteReadError.partial attribute
may contain the separator partially.
If the data cannot be read because of over limit, a
LimitOverrunError exception will be raised, and the data
will be left in the internal buffer, so it can be read again.
"""
_ensure_can_read(self._mode)
seplen = len(separator)
if seplen == 0:
raise ValueError('Separator should be at least one-byte string')
if self._exception is not None:
raise self._exception
# Consume whole buffer except last bytes, which length is
# one less than seplen. Let's check corner cases with
# separator='SEPARATOR':
# * we have received almost complete separator (without last
# byte). i.e buffer='some textSEPARATO'. In this case we
# can safely consume len(separator) - 1 bytes.
# * last byte of buffer is first byte of separator, i.e.
# buffer='abcdefghijklmnopqrS'. We may safely consume
# everything except that last byte, but this require to
# analyze bytes of buffer that match partial separator.
# This is slow and/or require FSM. For this case our
# implementation is not optimal, since require rescanning
# of data that is known to not belong to separator. In
# real world, separator will not be so long to notice
# performance problems. Even when reading MIME-encoded
# messages :)
# `offset` is the number of bytes from the beginning of the buffer
# where there is no occurrence of `separator`.
offset = 0
# Loop until we find `separator` in the buffer, exceed the buffer size,
# or an EOF has happened.
while True:
buflen = len(self._buffer)
# Check if we now have enough data in the buffer for `separator` to
# fit.
if buflen - offset >= seplen:
isep = self._buffer.find(separator, offset)
if isep != -1:
# `separator` is in the buffer. `isep` | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright (C) 2015-2018 Shenzhen Auto-link world Information Technology Co., Ltd.
All Rights Reserved
Name: MqttDump.py
Purpose:
Created By: <NAME> <<EMAIL>>
Created Date: 2018-01-11
Changelog:
Date Desc
2018-01-11 Created by Clive Lau
"""
# Builtin libraries
from datetime import datetime
# Third-party libraries
from robot.api import logger
# Customized libraries
logging = logger.console
class MqttDump(object):
logging = None
@staticmethod
def __enum_equipment_id_type(id_type):
"""设备编号类型"""
type_dict = {
0: "PDID",
1: "VIN",
2: "IMEI",
3: "ICCID",
}
return type_dict[id_type]
@staticmethod
def __enum_msg_type(msg_type):
"""消息类型"""
type_dict = {
0: "TYPE0",
1: "TYPE1",
2: "REGISTER_REQUEST",
3: "REGISTER_RESPONSE",
4: "LOGIN",
5: "LOGIN_RESPONSE",
6: "HEART_BEAT_RESPONSE",
7: "REMOTE_CONFIG_RESPONSE",
8: "REMOTE_CONFIG_REQUEST",
9: "REMOTE_CONFIG_RESULT",
10: "REMOTE_CONTROL_CMD",
11: "REMOTE_CONTROL_RESPONSE",
12: "OTA_CMD",
13: "OTA_CMD_RESPONSE",
14: "OTA_CMD_CHECK_REQUEST",
15: "OTA_CMD_CHECK_RESPONSE",
16: "OTA_RESULT",
17: "OTA_RESULT_RESPONSE",
18: "REMOTE_DIAGNOSIS_RESPONSE",
19: "REMOTE_DIAGNOSIS_RESULT",
20: "DATAMINING",
21: "VEHICLE_STATUS",
22: "ALARM_SIGNAL",
23: "ALARM_SIGNAL_RESPONSE",
24: "PUSH_MESSAGE",
25: "MOTOR_FIRE_SIGNAL",
26: "COMMON_ACK",
101: "HEART_BEAT",
102: "LOGOUT",
103: "REMOTE_CONFIG_QUERY_REQUEST",
104: "REMOTE_DIAGNOSIS_REQUEST",
105: "VEHICLE_STATUS_REQUEST",
}
return type_dict[msg_type]
@staticmethod
def __enum_common_ack_code(code):
"""通用的消息回复码"""
code_dict = {
0: "SUCCESS",
1: "FAILED",
2: "NOT_LOGIN",
3: "MESSAGE_PARSE_ERROR",
}
return code_dict[code]
@staticmethod
def __enum_remote_config_item(item):
"""远程配置项枚举"""
item_dict = {
0: "MQTT_SERVER_ADDR",
1: "MQTT_SERVER_TOPIC",
2: "MQTT_KEY_BUSINESS_SERVER_ADDR",
3: "MQTT_KEY_BUSINESS_SERVER_TOPIC",
4: "ECALL_NUMBER",
5: "BCALL_NUMBER",
6: "ICALL_NUMBER",
7: "ECALL_ENABLE",
8: "BCALL_ENABLE",
9: "ICALL_ENABLE",
10: "SMS_GATE_NUMBER_UPLOAD",
11: "SMS_GATE_NUMBER_DOWNLOAD",
12: "DATAMINING_UPLOAD_FREQUENCY",
13: "VEHICLE_STATUS_UPLOAD_FREQUENCY",
14: "IGNITION_BLOWOUT_UPLOAD_ENABLE",
15: "UPLOAD_ALERT_ENABLE",
16: "DATAMING_ENABLE",
17: "SVT_ENABLE",
18: "ELETRONIC_DEFENSE_ENABLE",
19: "ABNORMAL_MOVE_THRESHOLD_VALUE",
}
return item_dict[item]
@staticmethod
def __enum_remote_config_error_code(code):
"""远程配置错误码"""
code_dict = {
0: "UNKNOW",
}
return code_dict[code]
@staticmethod
def __enum_remote_control_cmd_type(cmd_type):
"""远程控制命令类型"""
type_dict = {
0: "ENGINE",
1: "AIR_CONDITION_CTRL",
2: "LOCK",
3: "FIND_VEHICLE",
}
return type_dict[cmd_type]
@staticmethod
def __enum_remote_control_execute_result(result):
"""远程控制执行结果"""
result_dict = {
0: "FAILED",
1: "SUCCESS",
}
return result_dict[result]
@staticmethod
def __enum_ota_cmd_result_code(code):
"""OTA升级命令执行结果码"""
code_dict = {
0: "UPGRADE_FAILED",
1: "UPGRADE_SUCCESSED",
2: "DOWNLOAD_FILE_FAILED",
3: "OTA_IN_PROCESS",
}
return code_dict[code]
@staticmethod
def __enum_peps_power_mode(mode):
"""PEPS电源模式"""
mode_dict = {
0: "DEFAULT",
1: "OFF",
2: "ACC",
3: "ON",
4: "START",
5: "INVALID"
}
return mode_dict[mode]
@staticmethod
def __enum_crash_info(info):
"""碰撞信息"""
info_dict = {
0: "NONE_CRASH",
1: "UNKNOWN_CRASH",
2: "HEAD_CRASH",
3: "LEFT_SIDE_CRASH",
4: "RIGHT_SIDE_CRASH",
5: "TAIL_CRASH",
6: "PEDESTRIAN",
7: "MUTI_CRASH",
}
return info_dict[info]
@staticmethod
def __enum_common_true_false_unknown(msg):
"""通用TRUE FALSE UNKNOWN"""
msg_dict = {
0: "FALSE",
1: "TRUE",
2: "UNKNOWN",
}
return msg_dict[msg]
@staticmethod
def __enum_alarm_signal_type(signal_type):
"""报警信号信息"""
type_dict = {
0: "AIR_BAG",
1: "SIDE_TURN",
2: "UNUSUAL_MOVE",
3: "ANTI_THEFT",
4: "VEHICLE_CRASH",
}
return type_dict[signal_type]
@staticmethod
def __enum_on_off_state(state):
"""on/off状态"""
state_dict = {
0: "UNKNOWN",
1: "OFF",
2: "ON"
}
return state_dict[state]
@staticmethod
def __enum_engine_state(state):
"""引擎状态"""
state_dict = {
0: "UNKNOWN",
1: "KEYOFF",
2: "KEYON",
3: "CRANK",
4: "RUNNING"
}
return state_dict[state]
@staticmethod
def __enum_gear_position(pos):
"""变速箱档位"""
pos_dict = {
0: "P",
1: "R",
2: "N",
3: "D",
4: "MANUAL_1",
5: "MANUAL_2",
6: "MANUAL_3",
7: "MANUAL_4",
8: "MANUAL_5",
9: "MANUAL_6",
10: "MANUAL_7",
11: "MANUAL_8",
12: "S",
13: "UNKNOW",
14: "Z1",
15: "Z2",
16: "Z3",
17: "Invalid"
}
return pos_dict[pos]
@staticmethod
def __enum_motor_fire_mode(mode):
"""点火熄火状态"""
mode_dict = {
0: "IGNITION",
1: "FLAMEOUT",
}
return mode_dict[mode]
@staticmethod
def __show_common_ack(ack_code):
msg = "{" \
+ "ack_code:" + MqttDump.__enum_common_ack_code(ack_code.ack_code) \
+ ", code_desp:" + ack_code.code_desp \
+ "}"
return msg
@staticmethod
def __show_gps_info(gps_info):
msg = "{" \
+ "longtitude:" + str(gps_info.longtitude) \
+ ", latitude:" + str(gps_info.latitude) \
+ ", altitude:" + str(gps_info.altitude) \
+ ", gps_heading:" + str(gps_info.gps_heading) \
+ ", gps_speed:" + str(gps_info.gps_speed) \
+ ", satellite_number:" + str(gps_info.satellite_number) \
+ ", valid:" + str(gps_info.valid) \
+ "}"
return msg
@staticmethod
def __show_remote_config_data(remote_config_data):
msg = "{" \
+ "mqtt_server_addr:" + remote_config_data.mqtt_server_addr \
+ ", mqtt_server_topic:" + remote_config_data.mqtt_server_topic \
+ ", mqtt_key_business_server_addr:" + remote_config_data.mqtt_key_business_server_addr \
+ ", mqtt_key_business_server_topic:" + remote_config_data.mqtt_key_business_server_topic \
+ ", ecall_number:" + remote_config_data.ecall_number \
+ ", bcall_number:" + remote_config_data.bcall_number \
+ ", icall_number:" + remote_config_data.icall_number \
+ ", ecall_enable:" + str(remote_config_data.ecall_enable) \
+ ", bcall_enable:" + str(remote_config_data.bcall_enable) \
+ ", icall_enable:" + str(remote_config_data.icall_enable) \
+ ", sms_gate_number_upload:" + remote_config_data.sms_gate_number_upload \
+ ", sms_gate_number_download:" + remote_config_data.sms_gate_number_download \
+ ", datamining_upload_frequency:" + str(remote_config_data.datamining_upload_frequency) \
+ ", vehicle_status_upload_frequency:" + str(remote_config_data.vehicle_status_upload_frequency) \
+ ", ignition_blowout_upload_enable:" + str(remote_config_data.ignition_blowout_upload_enable) \
+ ", upload_alert_enable:" + str(remote_config_data.upload_alert_enable) \
+ ", datamining_enable:" + str(remote_config_data.datamining_enable) \
+ ", svt_enable:" + str(remote_config_data.svt_enable) \
+ ", eletronic_defense_enable:" + str(remote_config_data.eletronic_defense_enable) \
+ ", abnormal_move_threshold_value:" + str(remote_config_data.abnormal_move_threshold_value) \
+ "}"
return msg
@staticmethod
def __show_remote_config_result(config_results):
msg = "{" \
+ "config_item:" + MqttDump.__enum_remote_config_item(config_results.config_item) \
+ ", result:" + str(config_results.result) \
+ ", error_code:" + MqttDump.__enum_remote_config_error_code(config_results.error_code) \
+ "}"
return msg
@staticmethod
def __show_air_condition_control_parameter(ac_parameter):
msg = "{" \
+ "ac_switch:" + str(ac_parameter.ac_switch) \
+ ", ac_temperature:" + str(ac_parameter.ac_temperature) \
+ ", ac_front_defrost:" + str(ac_parameter.ac_front_defrost) \
+ ", ac_rear_defrost:" + str(ac_parameter.ac_rear_defrost) \
+ "}"
return msg
@staticmethod
def __show_remote_control_response_vehice_info(vehicle_info):
msg = "{" \
+ "air_condition_status:" + MqttDump.__enum_on_off_state(vehicle_info.air_condition_status) \
+ ", air_condition_defrost_status:" + MqttDump.__enum_on_off_state(vehicle_info.air_condition_defrost_status) \
+ ", air_condition_rear_defrost_status" + MqttDump.__enum_on_off_state(vehicle_info.air_condition_rear_defrost_status) \
+ ", air_condition_temperature:" + str(vehicle_info.air_condition_temperature) \
+ ", lock_status:" + MqttDump.__enum_on_off_state(vehicle_info.lock_status) \
+ ", engine_status:" + MqttDump.__enum_engine_state(vehicle_info.engine_status) \
+ ", hand_brake_status:" + MqttDump.__enum_on_off_state(vehicle_info.hand_break_status) \
+ ", peps_power_mode:" + MqttDump.__enum_peps_power_mode(vehicle_info.peps_power_mode) \
+ ", gear_position:" + MqttDump.__enum_gear_position(vehicle_info.gear_position) \
+ "}"
return msg
@staticmethod
def __show_diagnosis_result(result):
msg = "{" \
+ "ecu_id:" + str(result.ecu_id) \
+ ", dtcs:" + str(result.dtcs) \
+ "}"
return msg
@staticmethod
def __show_gsensor_value(value):
msg = "{" \
+ "x:" + value.x \
+ ", y:" + value.y \
+ ", z:" + value.z \
+ "}"
return msg
@staticmethod
def __list_common_head(common_head):
"""通用头,必填字段"""
logging("====> CommonHead <====")
logging("protocol_version: " + str(common_head.protocol_version))
logging("equipment_id_type: " + MqttDump.__enum_equipment_id_type(common_head.equipment_id_type))
logging("equipment_id: " + common_head.equipment_id)
logging("message_id: " + str(common_head.message_id))
logging("msg_type: " + MqttDump.__enum_msg_type(common_head.msg_type))
logging("message_create_time: " + str(datetime.fromtimestamp(common_head.message_create_time)))
logging("token: " + common_head.token)
logging("flag: " + str(common_head.flag))
@staticmethod
def __list_extra_common_head(common_head):
"""通用头,必填字段"""
print("====> CommonHead <====")
print("protocol_version: " + str(common_head.protocol_version))
print("equipment_id_type: " + MqttDump.__enum_equipment_id_type(common_head.equipment_id_type))
print("equipment_id: " + common_head.equipment_id)
print("message_id: " + str(common_head.message_id))
print("msg_type: " + MqttDump.__enum_msg_type(common_head.msg_type))
print("message_create_time: " + str(datetime.fromtimestamp(common_head.message_create_time)))
print("token: " + common_head.token)
print("flag: " + str(common_head.flag))
@staticmethod
def __list_msg_register_request(register_request):
"""注册请求消息(在工厂模式下) 01"""
logging("====> MsgRegisterRequest <====")
logging("pdid: " + register_request.pdid)
logging("iccid: " + register_request.iccid)
logging("tbox_version: " + register_request.tbox_version)
@staticmethod
def __list_msg_register_response(register_response):
"""注册请求应答(在工厂模式下) 02"""
logging("====> MsgRegisterResponse <====")
logging("res_code: " + str(register_response.res_code))
logging("addr: " + register_response.addr)
logging("ca_cer: " + register_response.ca_cer)
logging("custom_cer: " + register_response.custom_cer)
@staticmethod
def __list_msg_login(login):
"""登录请求 03"""
logging("====> MsgLogIn <====")
logging("pdid: " + login.pdid)
logging("iccid: " + login.iccid)
logging("vin: " + login.vin)
logging("version: " + login.version)
logging("release_tag: " + login.release_tag)
@staticmethod
def __list_msg_log_in_response(login_response):
"""登录回复 04"""
logging("====> MsgLogInResponse <====")
logging("ack_code: " + MqttDump.__show_common_ack(login_response.ack_code))
logging("token: " + login_response.token)
# 链路检测 05 无消息体
@staticmethod
def __list_msg_heart_beat_response(heart_beat_response):
"""链路检测回复 06"""
logging("====> MsgHeartBeatResponse <====")
logging("ack_code: " + MqttDump.__show_common_ack(heart_beat_response.ack_code))
# 登出 07 无消息体
# 远程查询配置请求 08 无消息体
@staticmethod
def __list_msg_remote_config_response(remote_config_response):
"""远程查询配置回复 09"""
logging("====> MsgRemoteConfigResponse <====")
logging("ack_code: " + MqttDump.__show_common_ack(remote_config_response.ack_code))
logging("remote_config_data: " + MqttDump.__show_remote_config_data(remote_config_response.remote_config_data))
@staticmethod
def __list_msg_remote_config_request(remote_config_request):
"""远程配置请求 10"""
logging("====> MsgRemoteConfigRequest <====")
for item in remote_config_request.config_items:
logging("config_items: " + MqttDump.__enum_remote_config_item(item))
logging("config_data: " + MqttDump.__show_remote_config_data(remote_config_request.config_data))
@staticmethod
def __list_msg_remote_config_result(remote_config_result):
"""远程配置回复 11"""
print("====> MsgRemoteConfigResult <====")
print("ack_code: " + MqttDump.__show_common_ack(remote_config_result.ack_code))
for result in remote_config_result.config_results:
print("config_results: " + MqttDump.__show_remote_config_result(result))
print("config_old: " + MqttDump.__show_remote_config_data(remote_config_result.config_old))
print("config_new: " + MqttDump.__show_remote_config_data(remote_config_result.config_new))
@staticmethod
def __list_msg_remote_control_cmd(remote_control_cmd):
"""远程控制命令 12"""
logging("====> MsgRemoteControlCmd <====")
logging("cmd: " + MqttDump.__enum_remote_control_cmd_type(remote_control_cmd.cmd))
logging("ac_parameter: " + MqttDump.__show_air_condition_control_parameter(remote_control_cmd.ac_parameter))
logging("engine_parameter: " + str(remote_control_cmd.engine_parameter))
logging("lock_parameter: " + str(remote_control_cmd.lock_parameter))
@staticmethod
def __list_msg_remote_control_response(remote_control_response):
"""远程控制结果 13"""
print("====> MsgRemoteControlResponse <====")
print("ack_code: " + MqttDump.__show_common_ack(remote_control_response.ack_code))
print("excute_result: " + MqttDump.__enum_remote_control_execute_result(remote_control_response.excute_result))
print("error_code: " + remote_control_response.error_code)
print("gps_info: " + MqttDump.__show_gps_info(remote_control_response.gps_info))
print("vehicle_info: " + MqttDump.__show_remote_control_response_vehice_info(remote_control_response.vehicle_info))
@staticmethod
def __list_msg_ota_cmd(ota_cmd):
"""OTA升级命令 14"""
logging("====> MsgOtaCmd <====")
logging("update_target_version: " + ota_cmd.update_target_version)
logging("upgrade_file_download_addr: " + ota_cmd.upgrade_file_download_addr)
logging("ota_task_id: " + ota_cmd.ota_task_id)
@staticmethod
def __list_msg_ota_cmd_response(ota_cmd_response):
"""OTA升级命令回复 15"""
logging("====> MsgOtaCmdResponse <====")
logging("ack_code: " + MqttDump.__show_common_ack(ota_cmd_response.ack_code))
logging("ota_task_id: " + ota_cmd_response.ota_task_id)
@staticmethod
def __list_msg_ota_cmd_checksum_request(ota_cmd_check_request):
"""OTA升级文件checksum检查请求 16"""
logging("====> MsgOtaCmdCheckSumRequest <====")
logging("check_sum_code: " + ota_cmd_check_request.check_sum_code)
logging("upgrade_file_download_addr: " + ota_cmd_check_request.upgrade_file_download_addr)
logging("ota_task_id: " + ota_cmd_check_request.ota_task_id)
@staticmethod
def __list_msg_ota_cmd_checksum_response(ota_cmd_check_response):
"""OTA升级后台检查升级文件应答 17"""
logging("====> MsgOtaCmdCheckSumResponse <====")
logging("ack_code: " + MqttDump.__show_common_ack(ota_cmd_check_response.ack_code))
logging("check_sum_result: " + str(ota_cmd_check_response.check_sum_result))
logging("ota_task_id: " + ota_cmd_check_response.ota_task_id)
@staticmethod
def __list_msg_ota_result(ota_result):
"""OTA升级结果 18"""
logging("====> MsgOtaResult <====")
logging("before_upgrade_version: " + ota_result.before_upgrade_version)
logging("after_upgread_version: " + ota_result.after_upgread_version)
logging("result: " + MqttDump.__enum_ota_cmd_result_code(ota_result.result))
logging("upgrade_time: " + str(ota_result.upgrade_time))
logging("ota_task_id: " + ota_result.ota_task_id)
@staticmethod
def __list_msg_ota_result_response(ota_result_response):
"""OTA升级结果应答 19"""
logging("====> MsgOtaResultResponse <====")
logging("ack_code: " + MqttDump.__show_common_ack(ota_result_response.ack_code))
logging("ota_task_id: " + ota_result_response.ota_task_id)
# 远程诊断命令下发 20 无消息体
@staticmethod
def __list_msg_remote_diagnosis_response(remote_diagnosis_response):
"""远程诊断命令收到回复 21"""
logging("====> MsgRemoteDiagnosisResponse <====")
logging("ack_code: " + | |
full_json_list = []
remaining_content = True
while (remaining_content):
partial_json_list = self.list_rfps(start_date=start_date, end_date=end_date, page_size=page_size, page=page)
full_json_list.extend(partial_json_list)
page = page + 1
remaining_content = (len(partial_json_list) == page_size)
return full_json_list
def list_rfps_for_campaign(self, agency_group_id=None, agency_id=None, campaign_id=None):
"""
As a buyer, view all RFPs sent against a given campaign ID.
https://developer.mediaocean.com/docs/read/buyer_proposals/List_RFPs
"""
if campaign_id is None:
raise PATSException("Campaign ID is required")
if agency_group_id is None:
agency_group_id = self.agency_group_id
if agency_id is None:
agency_id = self.agency_id
extra_headers = {
'Accept': 'application/vnd.mediaocean.rfp-v1+json',
'X-MO-Organization-ID': agency_id,
'X-MO-Agency-Group-ID': agency_group_id,
'X-MO-User-Id': self.user_id,
'X-MO-App': 'prisma'
}
path = '/campaigns/%s/rfps?' % campaign_id
js = self._send_request(
"GET",
AGENCY_API_DOMAIN,
path,
extra_headers
)
return js
def view_rfp_detail(self, agency_group_id=None, agency_id=None, user_id=None, rfp_id=None):
"""
Get a single RFP using its public ID.
http://developer.mediaocean.com/docs/read/rfp_api/Get_rfp_by_publicid
"""
if rfp_id is None:
raise PATSException("RFP ID is required")
if user_id == None:
user_id = self.user_id
if agency_group_id is None:
agency_group_id = self.agency_group_id
if agency_id is None:
agency_id = self.agency_id
extra_headers = {
'Accept': 'application/vnd.mediaocean.rfp-v1+json',
'X-MO-Organization-ID': agency_id,
'X-MO-Agency-Group-ID': agency_group_id,
'X-MO-User-Id': user_id,
'X-MO-App': 'prisma'
}
js = self._send_request(
"GET",
AGENCY_API_DOMAIN,
"/rfps/%s" % rfp_id,
extra_headers
)
return js
def get_rfp_attachment(self, user_id=None, agency_id=None, agency_group_id=None, rfp_id=None, attachment_id=None):
"""
Get an attachment from an RFP.
http://developer.mediaocean.com/docs/read/rfp_api/Get_rfp_attachment_by_publicid
"""
if rfp_id is None:
raise PATSException("RFP ID is required")
if attachment_id is None:
raise PATSException("Attachment ID is required")
if user_id == None:
user_id = self.user_id
if agency_group_id is None:
agency_group_id = self.agency_group_id
if agency_id is None:
agency_id = self.agency_id
extra_headers = {
'Accept': 'application/vnd.mediaocean.rfp-v1+json',
'X-MO-Organization-ID': agency_id,
'X-MO-Agency-Group-ID': agency_group_id,
'X-MO-User-Id': user_id,
'X-MO-App': 'prisma'
}
js = self._send_request(
"GET",
AGENCY_API_DOMAIN,
"/rfps/%s/attachments/%s" % (rfp_id, attachment_id),
extra_headers
)
return js
def search_rfps(self, agency_group_id=None, agency_id=None, user_id=None, advertiser_name=None, campaign_urn=None, rfp_start_date=None,rfp_end_date=None,response_due_date=None,status=None):
"""
Search for RFPs by advertiser name, campaign ID, RFP dates, response due date and/or status.
http://developer.mediaocean.com/docs/rfp_api/Search_for_rfps
"""
if user_id is None:
user_id = self.user_id
if agency_id is None:
agency_id = self.agency_id
if agency_group_id is None:
agency_group_id = self.agency_group_id
extra_headers = {
'Accept': 'application/vnd.mediaocean.rfp-v1+json',
'X-MO-Organization-Id': agency_id,
'X-MO-Agency-Group-Id': agency_group_id,
'X-MO-User-Id': user_id,
'X-MO-App': 'prisma'
}
path = '/rfps'
if advertiser_name or campaign_urn or rfp_start_date or rfp_end_date or response_due_date or status:
path += "?"
if advertiser_name:
path += "advertiserName=%s&" % advertiser_name
if campaign_urn:
path += "campaignUrn=%s&" % campaign_urn
if rfp_start_date:
path += "startDate=%s&" % rfp_start_date
if rfp_end_date:
path += "endDate=%s&" % rfp_end_date
if response_due_date:
path += "responseDueDate=%s&" % response_due_date
if status:
path += "status=%s&" % status
js = self._send_request(
"GET",
AGENCY_API_DOMAIN,
path,
extra_headers
)
return js
def list_proposals(self, agency_group_id=None, agency_id=None, rfp_id=None, start_date=None, end_date=None, page=None):
"""
As a buyer, view all proposals I have sent and their status.
https://developer.mediaocean.com/docs/buyer_proposals/List_proposals
https://developer.mediaocean.com/docs/buyer_proposals/Find_proposals
"""
# Now that we can have seller-initiatied proposals, rfp_id is no longer required
#if rfp_id is None:
# raise PATSException("RFP ID is required")
if agency_group_id is None:
agency_group_id = self.agency_group_id
if agency_id is None:
agency_id = self.agency_id
extra_headers = {
'Accept': 'application/vnd.mediaocean.proposal-v2+json',
'X-MO-Organization-ID': agency_id,
'X-MO-Agency-Group-ID': agency_group_id,
'X-MO-User-Id': self.user_id,
'X-MO-App': 'prisma'
}
if rfp_id:
path = '/rfps/%s/proposals?' % rfp_id
if start_date:
path += "startDate=%s" % start_date.strftime("%Y-%m-%d")
if end_date:
path += "&endDate=%s" % end_date.strftime("%Y-%m-%d")
else:
# seller-initiated proposal:
path = '/proposals?'
if start_date:
path += "since=%s" % start_date.strftime("%Y-%m-%d")
if page:
path += "&page=%s" % page
js = self._send_request(
"GET",
AGENCY_API_DOMAIN,
path,
extra_headers
)
return js
def list_all_proposals(self, agency_group_id=None, agency_id=None, rfp_id=None, start_date=None, end_date=None):
"""
Loop over the list_proposals method until we definitely have all orders in an array
"""
page_size = 25
page = 1
full_json_list = []
remaining_content = True
while (remaining_content):
partial_json_list = self.list_proposals(
agency_group_id=agency_group_id, agency_id=agency_id, rfp_id=rfp_id,
start_date=start_date, end_date=end_date, page=page
)
full_json_list.extend(partial_json_list)
page = page + 1
remaining_content = (len(partial_json_list) == page_size)
return full_json_list
def view_proposal_detail(self, agency_group_id=None, agency_id=None, user_id=None, proposal_id=None):
"""
Get a single proposal using its public ID.
https://developer.mediaocean.com/docs/read/buyer_proposals/Get_proposal_details
"""
if proposal_id is None:
raise PATSException("Proposal ID is required")
if user_id == None:
user_id = self.user_id
if agency_group_id is None:
agency_group_id = self.agency_group_id
if agency_id is None:
agency_id = self.agency_id
extra_headers = {
'Accept': 'application/vnd.mediaocean.proposal-v1+json',
'X-MO-Organization-ID': agency_id,
'X-MO-Agency-Group-ID': agency_group_id,
'X-MO-User-Id': user_id,
'X-MO-App': 'prisma'
}
js = self._send_request(
"GET",
AGENCY_API_DOMAIN,
"/proposals/%s" % proposal_id,
extra_headers
)
return js
def get_proposal_attachment(self, user_id=None, agency_id=None, agency_group_id=None, proposal_id=None, attachment_id=None):
"""
Get contents of proposal attachment based on the proposal ID.
http://developer.mediaocean.com/docs/read/rfp_api/Get_proposal_attachment_by_publicid
"""
if user_id is None:
user_id = self.user_id
if agency_group_id is None:
agency_group_id = self.agency_group_id
if agency_id is None:
agency_id = self.agency_id
if proposal_id is None:
raise PATSException("Proposal ID is required")
if attachment_id is None:
raise PATSException("Attachment ID is required")
extra_headers = {
'Accept': 'application/vnd.mediaocean.proposal-v1+json',
'X-MO-Organization-ID': agency_id,
'X-MO-Agency-Group-ID': agency_group_id,
'X-MO-User-Id': user_id,
'X-MO-App': 'prisma'
}
js = self._send_request(
"GET",
AGENCY_API_DOMAIN,
"/proposals/%s/attachments/%s" % (proposal_id, attachment_id),
extra_headers
)
return js
def return_proposal(self, agency_group_id=None, agency_id=None, user_id=None,
proposal_id=None, comments=None, due_date=None, emails=None,
attachments=None):
"""
"Return a proposal", which means "send a comment back to the
seller that sent me this proposal"
https://developer.mediaocean.com/docs/read/buyer_proposals/Return_proposal
"""
if agency_group_id is None:
agency_group_id = self.agency_group_id
if agency_id is None:
agency_id = self.agency_id
if user_id is None:
user_id = self.user_id
extra_headers = {
'Accept': 'application/vnd.mediaocean.proposal-v1+json',
'X-MO-User-Id': user_id,
'X-MO-Agency-Group-ID': agency_group_id,
'X-MO-Organization-ID': agency_id,
'X-MO-App': 'prisma'
}
if proposal_id is None:
raise PATSException("Proposal ID is required")
data = {
'comments': comments,
'dueDate': due_date.strftime("%Y-%m-%d"),
'emails': emails
}
if attachments:
data.update({
'attachments': attachments
})
js = self._send_request(
"POST",
AGENCY_API_DOMAIN,
"/proposals/%s/return" % proposal_id,
extra_headers,
json.dumps(data)
)
return js
def link_proposal_to_campaign(self, agency_group_id=None, agency_id=None,
user_id=None, proposal_id=None, campaign_id=None):
"""
New in 2017.1 - link a (seller-initiated) proposal to a campaign
https://developer.mediaocean.com/docs/read/buyer_proposals/Link_sip_to_campaign
The workflow is:
- Seller creates and sends a new proposal
- Buyer and seller might go back and forth a few times
- Buyer creates a campaign
- Buyer links proposal to campaign
- Buyer sends order on the campaign
- Seller accepts order.
"""
if campaign_id is None:
raise PATSException("Campaign ID is required")
if agency_group_id is None:
agency_group_id = self.agency_group_id
if agency_id is None:
agency_id = self.agency_id
if user_id is None:
user_id = self.user_id
extra_headers = {
'Accept': 'application/vnd.mediaocean.proposal-v2+json',
'X-MO-User-Id': user_id,
'X-MO-Agency-Group-ID': agency_group_id,
'X-MO-Organization-ID': agency_id,
'X-MO-App': 'prisma'
}
path = '/proposals/%s' % proposal_id
path += '?operation=link&campaignId=%s' % campaign_id
js = self._send_request(
"PUT",
AGENCY_API_DOMAIN,
path,
extra_headers,
None # no payload -- no json.dumps(data)
)
return js
def list_products(self, user_id=None, agency_id=None, agency_group_id=None, vendor_id=None):
"""
New in 2016.4 - get publisher's products
Only shows products that the vendor has chosen to share with this agency
https://developer.mediaocean.com/docs/buyer_catalog/Get_products_buyer
"""
# "vendor_id" is the one being requested
if vendor_id == None:
raise PATSException("Vendor id is required")
if agency_group_id == None:
agency_group_id = self.agency_group_id
if agency_id == None:
agency_id = self.agency_id
if user_id == None:
user_id = self.user_id
extra_headers = {
'Accept': 'application/vnd.mediaocean.catalog-v1+json',
'X-MO-User-ID': user_id,
'X-MO-Agency-Group-ID': self.agency_group_id,
'X-MO-Organization-ID': self.agency_id,
'X-MO-App': 'prisma'
}
# a publisher can query another publisher's properties if they want...
path = '/vendors/%s/products' % vendor_id
js = self._send_request(
"GET",
AGENCY_API_DOMAIN,
path,
extra_headers
)
return js
def old_list_products(self, vendor_id=None, user_id=None, start_index=None, max_results=None, include_logo=False):
"""
List products in a vendor's product catalogue.
The parameters are :
- vendor_id (required): ID of the vendor (publisher) whose catalogue
you are requesting.
- start_index (optional): First product to load (if doing paging)
- max_results (optional):
http://developer.mediaocean.com/docs/read/catalog_api/List_catalog_products
"""
if vendor_id is None:
raise PATSException("Vendor ID is required")
if user_id is None:
user_id = self.user_id
extra_headers = {
'Accept': 'application/vnd.mediaocean.catalog-v1+json',
'X-MO-User-Id': user_id
}
params = {}
if start_index:
params.update({'start_index' : start_index})
if max_results:
params.update({'max_results' : max_results})
if include_logo:
params.update({'include_logo' : include_logo})
params = urlencode(params)
js = self._send_request(
"GET",
AGENCY_API_DOMAIN,
"/agencies/%s/vendors/%s/products/?%s" % (self.agency_id, vendor_id, params),
extra_headers
)
# result looks like
# {"total":117,"products":[{"vendorPublicId":"35-EEBMG4J-4","productPublicId":"PC-11TU", ... }
return js
def get_media_property_details(self, user_id=None, agency_group_id=None, agency_id=None, organisation_id=None):
"""
List a vendor's media property fields and field restrictions.
https://developer.mediaocean.com/docs/buyer_catalog/Get_media_property_details_buyer
"""
if agency_id == None:
agency_id = self.agency_id
if agency_group_id == None:
agency_group_id = self.agency_group_id
if user_id == None:
user_id = self.user_id
extra_headers = {
'Accept': 'application/vnd.mediaocean.catalog-v1+json',
'X-MO-User-ID': user_id,
'X-MO-Agency-Group-ID': self.agency_group_id,
'X-MO-Organization-ID': self.agency_id,
'X-MO-App': 'prisma'
}
path = '/vendors/%s/mediaproperties/fields' % organisation_id
js = self._send_request(
"GET",
AGENCY_API_DOMAIN,
path,
extra_headers
)
return js
def send_order(self, agency_id=None, agency_group_id=None, user_id=None, campaign_id=None, media_type=None, barter_detail=None, currency_code=None, external_order_id=None, vendor_id=None, recipient_emails=None, buyer_dict=None, notify_emails=None, additional_info=None, order_comment=None, respond_by_date=None, terms_and_conditions_name=None, terms_and_conditions_content=None, | |
<reponame>salv-orlando/vmware-nsxlib
# Copyright 2016 VMware, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import collections
import inspect
import re
from threading import Lock
import time
from oslo_log import log
import tenacity
from tenacity import _utils as tenacity_utils
from vmware_nsxlib._i18n import _
from vmware_nsxlib.v3 import constants
from vmware_nsxlib.v3 import exceptions as nsxlib_exc
from vmware_nsxlib.v3 import nsx_constants
LOG = log.getLogger(__name__)
TagLimits = collections.namedtuple('TagLimits',
['scope_length', 'tag_length', 'max_tags'])
# These defaults reflect latest tag & scope limits on the backend. As of 2.5,
# backend no longer returns tag limit via API.
MAX_RESOURCE_TYPE_LEN = 128
MAX_TAG_LEN = 256
MAX_TAGS = 15
MAX_NSGROUPS_CRITERIA_TAGS = 10
DEFAULT_MAX_ATTEMPTS = 10
DEFAULT_CACHE_AGE_SEC = 600
INJECT_HEADERS_CALLBACK = None
IS_ATTR_SET_CALLBACK = None
def set_is_attr_callback(callback):
global IS_ATTR_SET_CALLBACK
IS_ATTR_SET_CALLBACK = callback
def is_attr_set(attr):
if IS_ATTR_SET_CALLBACK:
return IS_ATTR_SET_CALLBACK(attr)
return attr is not None
def set_inject_headers_callback(callback):
global INJECT_HEADERS_CALLBACK
INJECT_HEADERS_CALLBACK = callback
def censor_headers(headers):
censored_headers = ['authorization', 'x-xsrf-token', 'cookie']
result = {}
for name, value in headers.items():
if name.lower() in censored_headers:
result[name] = '--- CENSORED ---'
else:
result[name] = value
return result
def _update_resource_length(length):
global MAX_RESOURCE_TYPE_LEN
MAX_RESOURCE_TYPE_LEN = length
def _update_tag_length(length):
global MAX_TAG_LEN
MAX_TAG_LEN = length
def _update_max_tags(max_tags):
global MAX_TAGS
MAX_TAGS = max_tags
def _update_max_nsgroups_criteria_tags(max_tags):
global MAX_NSGROUPS_CRITERIA_TAGS
MAX_NSGROUPS_CRITERIA_TAGS = max(10, max_tags - 5)
def update_tag_limits(limits):
_update_resource_length(limits.scope_length)
_update_tag_length(limits.tag_length)
_update_max_tags(limits.max_tags)
_update_max_nsgroups_criteria_tags(limits.max_tags)
def _validate_resource_type_length(resource_type):
# Add in a validation to ensure that we catch this at build time
if len(resource_type) > MAX_RESOURCE_TYPE_LEN:
raise nsxlib_exc.NsxLibInvalidInput(
error_message=(_('Resource type cannot exceed %(max_len)s '
'characters: %(resource_type)s') %
{'max_len': MAX_RESOURCE_TYPE_LEN,
'resource_type': resource_type}))
def add_v3_tag(tags, resource_type, tag):
_validate_resource_type_length(resource_type)
tags.append({'scope': resource_type, 'tag': tag[:MAX_TAG_LEN]})
return tags
def update_v3_tags(current_tags, tags_update):
current_scopes = set([tag['scope'] for tag in current_tags])
updated_scopes = set([tag['scope'] for tag in tags_update])
# All tags scopes which are either completely new or already defined on the
# resource are left in place, unless the tag value is empty, in that case
# it is ignored.
tags = [{'scope': tag['scope'], 'tag': tag['tag']}
for tag in (current_tags + tags_update)
if tag['tag'] and
tag['scope'] in (current_scopes ^ updated_scopes)]
modified_scopes = current_scopes & updated_scopes
for tag in tags_update:
if tag['scope'] in modified_scopes:
# If the tag value is empty or None, then remove the tag completely
if tag['tag']:
tag['tag'] = tag['tag'][:MAX_TAG_LEN]
tags.append(tag)
return tags
def _log_before_retry(retry_state):
"""Before call strategy that logs to some logger the attempt."""
if retry_state.attempt_number > 1:
LOG.warning("Retrying call to '%(func)s' for the %(num)s time",
{'func': tenacity_utils.get_callback_name(
retry_state.fn),
'num': tenacity_utils.to_ordinal(
retry_state.attempt_number)})
def _get_args_from_frame(frames, frame_num):
if len(frames) > frame_num and frames[frame_num] and frames[frame_num][0]:
argvalues = inspect.getargvalues(frames[frame_num][0])
formated_args = inspect.formatargvalues(*argvalues)
# remove the first 'self' arg from the log as it adds no information
formated_args = re.sub(r'\(self=.*?, ', "(", formated_args)
return formated_args
def _log_after_retry(retry_state):
"""After call strategy that logs to some logger the finished attempt."""
# Using inspect to get arguments of the relevant call
frames = inspect.trace()
# Look at frame #2 first because of the internal functions _do_X
formated_args = _get_args_from_frame(frames, 2)
if not formated_args:
formated_args = _get_args_from_frame(frames, 1)
if not formated_args:
formated_args = "Unknown"
LOG.warning("Finished retry of %(func)s for the %(num)s time after "
"%(time)0.3f(s) with args: %(args)s",
{'func': tenacity_utils.get_callback_name(retry_state.fn),
'num': tenacity_utils.to_ordinal(retry_state.attempt_number),
'time': retry_state.seconds_since_start,
'args': formated_args})
def retry_upon_exception(exc, delay=0.5, max_delay=2,
max_attempts=DEFAULT_MAX_ATTEMPTS):
# exc can be a single exception or a tuple of exceptions
return tenacity.retry(reraise=True,
retry=tenacity.retry_if_exception_type(exc),
wait=tenacity.wait_exponential(
multiplier=delay, max=max_delay),
stop=tenacity.stop_after_attempt(max_attempts),
before=_log_before_retry, after=_log_after_retry)
def retry_random_upon_exception(exc, delay=0.5, max_delay=5,
max_attempts=DEFAULT_MAX_ATTEMPTS):
return tenacity.retry(reraise=True,
retry=tenacity.retry_if_exception_type(exc),
wait=tenacity.wait_random_exponential(
multiplier=delay, max=max_delay),
stop=tenacity.stop_after_attempt(max_attempts),
before=_log_before_retry, after=_log_after_retry)
def retry_upon_none_result(max_attempts, delay=0.5, max_delay=10,
random=False):
if random:
wait_func = tenacity.wait_random_exponential(
multiplier=delay, max=max_delay)
else:
wait_func = tenacity.wait_exponential(
multiplier=delay, max=max_delay)
return tenacity.retry(reraise=True,
retry=tenacity.retry_if_result(lambda x: x is None),
wait=wait_func,
stop=tenacity.stop_after_attempt(max_attempts),
before=_log_before_retry, after=_log_after_retry)
class RetryAttemptsExceeded(tenacity.RetryError):
def reraise(self):
raise self.last_attempt.result()
# Retry when exception is returned by decorated function.
# If retry attempts are exceeded, reraise the last exception.
# This is achieved by overriding reraise method of RetryAttemptsExceeded
def retry_random_upon_exception_result(max_attempts, delay=0.5, max_delay=10):
wait_func = tenacity.wait_random_exponential(
multiplier=delay, max=max_delay)
return tenacity.retry(reraise=True,
retry_error_cls=RetryAttemptsExceeded,
retry=tenacity.retry_if_result(
lambda x: isinstance(x, Exception)),
wait=wait_func,
stop=tenacity.stop_after_attempt(max_attempts),
before=_log_before_retry, after=_log_after_retry)
def list_match(list1, list2):
# Check if list1 and list2 have identical elements, but relaxed on
# dict elements where list1's dict element can be a subset of list2's
# corresponding element.
if (not isinstance(list1, list) or not isinstance(list2, list) or
len(list1) != len(list2)):
return False
list1 = sorted(list1)
list2 = sorted(list2)
for (v1, v2) in zip(list1, list2):
if isinstance(v1, dict):
if not dict_match(v1, v2):
return False
elif isinstance(v1, list):
if not list_match(v1, v2):
return False
elif v1 != v2:
return False
return True
def dict_match(dict1, dict2):
# Check if dict1 is a subset of dict2.
if not isinstance(dict1, dict) or not isinstance(dict2, dict):
return False
for k1, v1 in dict1.items():
if k1 not in dict2:
return False
v2 = dict2[k1]
if isinstance(v1, dict):
if not dict_match(v1, v2):
return False
elif isinstance(v1, list):
if not list_match(v1, v2):
return False
elif v1 != v2:
return False
return True
def get_name_short_uuid(uuid):
return '_' + uuid[:5] + '...' + uuid[-5:]
def get_name_and_uuid(name, uuid, tag=None, maxlen=80):
short_uuid = get_name_short_uuid(uuid)
maxlen = maxlen - len(short_uuid)
if tag:
maxlen = maxlen - len(tag) - 1
return name[:maxlen] + '_' + tag + short_uuid
else:
return name[:maxlen] + short_uuid
def build_extra_args(body, extra_args, **kwargs):
for arg in extra_args:
if arg in kwargs:
body[arg] = kwargs[arg]
return body
def escape_tag_data(data):
# ElasticSearch query_string requires slashes and dashes to
# be escaped. We assume no other reserved characters will be
# used in tag scopes or values
return data.replace('/', '\\/').replace('-', '\\-').replace(':', '\\:')
def escape_display_name(display_name):
# Illegal characters for the display names are ;|=,~@
rx = re.compile('([;|=,~@])')
return rx.sub('.', display_name)
class NsxLibCache(object):
def __init__(self, timeout):
self.timeout = timeout
self._cache = {}
super(NsxLibCache, self).__init__()
def expired(self, entry):
return (time.time() - entry['time']) > self.timeout
def get(self, key):
if key in self._cache:
# check that the value is still valid
if self.expired(self._cache[key]):
# this entry has expired
self.remove(key)
else:
return self._cache[key]['value']
def update(self, key, value):
self._cache[key] = {'time': time.time(),
'value': value}
def remove(self, key):
if key in self._cache:
del self._cache[key]
class NsxLibApiBase(object):
"""Base class for nsxlib api """
def __init__(self, client, nsxlib_config=None, nsxlib=None):
self.client = client
self.nsxlib_config = nsxlib_config
self.nsxlib = nsxlib
super(NsxLibApiBase, self).__init__()
self.cache = NsxLibCache(self.cache_timeout)
self.max_attempts = (self.client.max_attempts
if hasattr(self.client, 'max_attempts') else 1)
@abc.abstractproperty
def uri_segment(self):
pass
@abc.abstractproperty
def resource_type(self):
pass
@property
def use_cache_for_get(self):
"""By default no caching is used"""
return False
@property
def cache_timeout(self):
"""the default cache aging time in seconds"""
return DEFAULT_CACHE_AGE_SEC
def get_path(self, resource=None):
if resource:
return '%s/%s' % (self.uri_segment, resource)
return self.uri_segment
def list(self):
return self.client.list(self.uri_segment)
def get(self, uuid, silent=False):
if self.use_cache_for_get:
# try to get it from the cache
result = self.cache.get(uuid)
if result:
if not silent:
LOG.debug("Getting %s from cache.", self.get_path(uuid))
return result
# call the client
result = self.client.get(self.get_path(uuid), silent=silent)
if result and self.use_cache_for_get:
# add the result to the cache
self.cache.update(uuid, result)
return result
def read(self, uuid, silent=False):
"""The same as get"""
return self.get(uuid, silent=silent)
def delete(self, uuid):
if self.use_cache_for_get:
self.cache.remove(uuid)
return self._delete_with_retry(uuid)
def find_by_display_name(self, display_name):
found = []
for resource in self.list()['results']:
if resource['display_name'] == display_name:
found.append(resource)
return found
def _update_with_retry(self, uuid, payload):
if self.use_cache_for_get:
self.cache.remove(uuid)
return self._update_resource(self.get_path(uuid), payload, retry=True)
def _internal_update_resource(self, resource, payload, headers=None,
create_action=False,
get_params=None,
action_params=None,
update_payload_cbk=None):
get_path = action_path = resource
if get_params:
get_path = get_path + get_params
if action_params:
action_path = action_path + action_params
revised_payload = self.client.get(get_path)
# custom resource callback for updating the payload
if update_payload_cbk:
update_payload_cbk(revised_payload, payload)
# special treatment for tags (merge old and new)
if 'tags_update' in payload.keys():
revised_payload['tags'] = update_v3_tags(
revised_payload.get('tags', []),
payload['tags_update'])
del payload['tags_update']
# update all the rest of the parameters
for key_name in payload.keys():
# handle 2 levels of dictionary:
if isinstance(payload[key_name], dict):
if key_name not in revised_payload:
revised_payload[key_name] = payload[key_name]
else:
# copy each key
revised_payload[key_name].update(payload[key_name])
else:
revised_payload[key_name] = payload[key_name]
if create_action:
return self.client.create(action_path, revised_payload,
headers=headers)
else:
return self.client.update(action_path, revised_payload,
headers=headers)
def _update_resource(self, resource, payload, headers=None,
create_action=False, get_params=None,
action_params=None, update_payload_cbk=None,
retry=False):
if retry:
# If revision_id of | |
import functools
from numbers import Integral
from itertools import product, starmap, cycle
from collections import defaultdict
from autoray import do
from ..utils import check_opt, ensure_dict
from ..gen.rand import randn, seed_rand
from . import array_ops as ops
from .tensor_core import (
Tensor,
TensorNetwork,
oset,
tags_to_oset,
rand_uuid,
)
def gen_3d_bonds(Lx, Ly, Lz, steppers, coo_filter=None):
"""Convenience function for tiling pairs of bond coordinates on a 3D
lattice given a function like ``lambda i, j, k: (i + 1, j + 1, k + 1)``.
Parameters
----------
Lx : int
The number of x-slices.
Ly : int
The number of y-slices.
Lz : int
The number of z-slices.
steppers : callable or sequence of callable
Function(s) that take args ``(i, j, k)`` and generate another
coordinate, thus defining a bond.
coo_filter : callable
Function that takes args ``(i, j, k)`` and only returns ``True`` if
this is to be a valid starting coordinate.
Yields
------
bond : tuple[tuple[int, int, int], tuple[int, int, int]]
A pair of coordinates.
Examples
--------
Generate nearest neighbor bonds:
>>> for bond in gen_3d_bonds(2, 2, 2, [lambda i, j, k: (i + 1, j, k),
... lambda i, j, k: (i, j + 1, k),
... lambda i, j, k: (i, j, k + 1)]):
... print(bond)
((0, 0, 0), (1, 0, 0))
((0, 0, 0), (0, 1, 0))
((0, 0, 0), (0, 0, 1))
((0, 0, 1), (1, 0, 1))
((0, 0, 1), (0, 1, 1))
((0, 1, 0), (1, 1, 0))
((0, 1, 0), (0, 1, 1))
((0, 1, 1), (1, 1, 1))
((1, 0, 0), (1, 1, 0))
((1, 0, 0), (1, 0, 1))
((1, 0, 1), (1, 1, 1))
((1, 1, 0), (1, 1, 1))
"""
if callable(steppers):
steppers = (steppers,)
for i, j, k in product(range(Lx), range(Ly), range(Lz)):
if (coo_filter is None) or coo_filter(i, j, k):
for stepper in steppers:
i2, j2, k2 = stepper(i, j, k)
if (0 <= i2 < Lx) and (0 <= j2 < Ly) and (0 <= k2 < Lz):
yield (i, j, k), (i2, j2, k2)
class Rotator3D:
"""Object for rotating coordinates and various contraction functions so
that the core algorithms only have to written once, but nor does the actual
TN have to be modified.
"""
def __init__(self, tn, xrange, yrange, zrange, from_which):
check_opt('from_which', from_which,
{'xmin', 'xmax', 'ymin', 'ymax', 'zmin', 'zmax'})
if xrange is None:
xrange = (0, tn.Lx - 1)
if yrange is None:
yrange = (0, tn.Ly - 1)
if zrange is None:
zrange = (0, tn.Lz - 1)
self.xrange = xrange
self.yrange = yrange
self.zrange = zrange
self.from_which = from_which
self.plane = from_which[0]
if self.plane == 'x':
# -> no rotation needed
self.imin, self.imax = sorted(xrange)
self.jmin, self.jmax = sorted(yrange)
self.kmin, self.kmax = sorted(zrange)
self.x_tag = tn.x_tag
self.y_tag = tn.y_tag
self.z_tag = tn.z_tag
self.site_tag = tn.site_tag
elif self.plane == 'y':
# -> (y, z, x)
self.imin, self.imax = sorted(yrange)
self.jmin, self.jmax = sorted(zrange)
self.kmin, self.kmax = sorted(xrange)
self.x_tag = tn.y_tag
self.y_tag = tn.z_tag
self.z_tag = tn.x_tag
self.site_tag = lambda i, j, k: tn.site_tag(k, i, j)
else: # self.plane == 'z'
# -> (z, x, y)
self.imin, self.imax = sorted(zrange)
self.jmin, self.jmax = sorted(xrange)
self.kmin, self.kmax = sorted(yrange)
self.x_tag = tn.z_tag
self.y_tag = tn.x_tag
self.z_tag = tn.y_tag
self.site_tag = lambda i, j, k: tn.site_tag(j, k, i)
if 'min' in from_which:
# -> sweeps are increasing
self.sweep = range(self.imin, self.imax + 1, + 1)
self.istep = +1
else: # 'max'
# -> sweeps are decreasing
self.sweep = range(self.imax, self.imin - 1, -1)
self.istep = -1
# reference for viewing a cube from each direction
#
# ┌──┐ ┌──┐ ┌──┐ ┌──┐ ┌──┐ ┌──┐
# │y+│ │z+│ │x-│ │y-│ │z-│ │x+│
# ┌──┼──┼──┐ ┌──┼──┼──┐ ┌──┼──┼──┐ ┌──┼──┼──┐ ┌──┼──┼──┐ ┌──┼──┼──┐
# │z-│x-│z+│ │x-│y-│x+│ │y+│z-│y-│ │z-│x+│z+│ │x-│y+│x+│ │y+│z+│y-│
# └──┼──┼──┘, └──┼──┼──┘, └──┼──┼──┘, └──┼──┼──┘, └──┼──┼──┘, └──┼──┼──┘
# │y-│ │z-│ │x+│ │y+│ │z+│ │x-│
# └──┘ └──┘ └──┘ └──┘ └──┘ └──┘
_canonize_plane_opts = {
'xmin': {
'yreverse': False,
'zreverse': False,
'coordinate_order': 'yz',
'stepping_order': 'zy',
},
'ymin': {
'zreverse': False,
'xreverse': True,
'coordinate_order': 'zx',
'stepping_order': 'xz',
},
'zmin': {
'xreverse': True,
'yreverse': True,
'coordinate_order': 'xy',
'stepping_order': 'yx',
},
'xmax': {
'yreverse': True,
'zreverse': True,
'coordinate_order': 'yz',
'stepping_order': 'zy',
},
'ymax': {
'zreverse': True,
'xreverse': False,
'coordinate_order': 'zx',
'stepping_order': 'xz',
},
'zmax': {
'xreverse': False,
'yreverse': False,
'coordinate_order': 'xy',
'stepping_order': 'yx',
},
}
_compress_plane_opts = {
'xmin': {
'yreverse': True,
'zreverse': True,
'coordinate_order': 'yz',
'stepping_order': 'zy',
},
'ymin': {
'zreverse': True,
'xreverse': False,
'coordinate_order': 'zx',
'stepping_order': 'xz',
},
'zmin': {
'xreverse': False,
'yreverse': False,
'coordinate_order': 'xy',
'stepping_order': 'yx',
},
'xmax': {
'yreverse': False,
'zreverse': False,
'coordinate_order': 'yz',
'stepping_order': 'zy',
},
'ymax': {
'zreverse': False,
'xreverse': True,
'coordinate_order': 'zx',
'stepping_order': 'xz',
},
'zmax': {
'xreverse': True,
'yreverse': True,
'coordinate_order': 'xy',
'stepping_order': 'yx',
},
}
class TensorNetwork3D(TensorNetwork):
_NDIMS = 3
_EXTRA_PROPS = (
'_site_tag_id',
'_x_tag_id',
'_y_tag_id',
'_z_tag_id',
'_Lx',
'_Ly',
'_Lz',
)
def _compatible_3d(self, other):
"""Check whether ``self`` and ``other`` are compatible 3D tensor
networks such that they can remain a 3D tensor network when combined.
"""
return (
isinstance(other, TensorNetwork3D) and
all(getattr(self, e) == getattr(other, e)
for e in TensorNetwork3D._EXTRA_PROPS)
)
def __and__(self, other):
new = super().__and__(other)
if self._compatible_3d(other):
new.view_as_(TensorNetwork3D, like=self)
return new
def __or__(self, other):
new = super().__or__(other)
if self._compatible_3d(other):
new.view_as_(TensorNetwork3D, like=self)
return new
@property
def Lx(self):
"""The number of x-slices.
"""
return self._Lx
@property
def Ly(self):
"""The number of y-slices.
"""
return self._Ly
@property
def Lz(self):
"""The number of z-slices.
"""
return self._Lz
@property
def nsites(self):
"""The total number of sites.
"""
return self._Lx * self._Ly * self._Lz
@property
def site_tag_id(self):
"""The string specifier for tagging each site of this 3D TN.
"""
return self._site_tag_id
def site_tag(self, i, j, k):
"""The name of the tag specifiying the tensor at site ``(i, j, k)``.
"""
if not isinstance(i, str):
i = i % self.Lx
if not isinstance(j, str):
j = j % self.Ly
if not isinstance(k, str):
k = k % self.Lz
return self.site_tag_id.format(i, j, k)
@property
def x_tag_id(self):
"""The string specifier for tagging each x-slice of this 3D TN.
"""
return self._x_tag_id
def x_tag(self, i):
if not isinstance(i, str):
i = i % self.Lx
return self.x_tag_id.format(i)
@property
def x_tags(self):
"""A tuple of all of the ``Lx`` different x-slice tags.
"""
return tuple(map(self.x_tag, range(self.Lx)))
@property
def y_tag_id(self):
"""The string specifier for tagging each y-slice of this 3D TN.
"""
return self._y_tag_id
def y_tag(self, j):
if not isinstance(j, str):
j = j % self.Ly
return self.y_tag_id.format(j)
@property
def y_tags(self):
"""A tuple of all of the ``Ly`` different y-slice tags.
"""
return tuple(map(self.y_tag, range(self.Ly)))
@property
def z_tag_id(self):
"""The string specifier for tagging each z-slice of this 3D TN.
"""
return self._z_tag_id
def z_tag(self, k):
if not isinstance(k, str):
k = k % self.Lz
return self.z_tag_id.format(k)
@property
def z_tags(self):
"""A tuple of all of the ``Lz`` different z-slice tags.
"""
return tuple(map(self.z_tag, range(self.Lz)))
@property
def site_tags(self):
"""All of the ``Lx * Ly`` site tags.
"""
return tuple(starmap(self.site_tag, self.gen_site_coos()))
def maybe_convert_coo(self, coo):
"""Check if ``coo`` is a tuple of three ints and convert to the
corresponding site tag if so.
"""
if not isinstance(coo, str):
try:
i, j, k = map(int, coo)
return self.site_tag(i, j, k)
except (ValueError, TypeError):
pass
return coo
def _get_tids_from_tags(self, tags, which='all'):
"""This is the function that lets coordinates such as ``(i, j, k)`` be
used for many 'tag' based functions.
"""
tags = self.maybe_convert_coo(tags)
return super()._get_tids_from_tags(tags, which=which)
def gen_site_coos(self):
"""Generate coordinates for all the sites in this 3D TN.
"""
return product(range(self.Lx), range(self.Ly), range(self.Lz))
def gen_bond_coos(self):
"""Generate pairs of coordinates for all the bonds in this 3D TN.
"""
return gen_3d_bonds(self.Lx, self.Ly, self.Lz, steppers=[
lambda i, j, k: (i + 1, j, k),
lambda i, j, k: (i, j + 1, k),
lambda i, j, k: (i, j, k + 1),
])
def valid_coo(self, coo, xrange=None, yrange=None, zrange=None):
"""Check whether ``coo`` is in-bounds.
Parameters
----------
coo : (int, int, int), optional
The coordinates to check.
xrange, yrange, zrange : (int, int), optional
The range of allowed values for the x, y, | |
return LiteralInput(**io_info)
elif io_select == WPS_OUTPUT:
io_info.pop("min_occurs", None)
io_info.pop("max_occurs", None)
io_info.pop("allowed_values", None)
io_info.pop("data_format", None)
io_info.pop("default", None)
if io_type in WPS_COMPLEX_TYPES:
io_info.pop("supported_values", None)
return ComplexOutput(**io_info)
if io_type == WPS_BOUNDINGBOX:
io_info.pop("supported_formats", None)
return BoundingBoxOutput(**io_info)
if io_type == WPS_LITERAL:
io_info.pop("supported_formats", None)
io_info["data_type"] = json2wps_datatype(io_info)
allowed_values = json2wps_allowed_values(io_info)
if allowed_values:
io_info["allowed_values"] = allowed_values
else:
io_info.pop("allowed_values", None)
io_info.pop("literalDataDomains", None)
return LiteralOutput(**io_info)
raise PackageTypeError("Unknown conversion from dict to WPS type (type={0}, mode={1}).".format(io_type, io_select))
def wps2json_io(io_wps):
# type: (WPS_IO_Type) -> JSON_IO_Type
"""
Converts a PyWPS I/O into a dictionary based version with keys corresponding to standard names (WPS 2.0).
"""
if not isinstance(io_wps, BasicIO):
raise PackageTypeError("Invalid type, expected 'BasicIO', got: [{0!r}] '{1!r}'".format(type(io_wps), io_wps))
if not hasattr(io_wps, "json"):
raise PackageTypeError("Invalid type definition expected to have a 'json' property.")
io_wps_json = io_wps.json # noqa
rename = {
"identifier": "id",
"abstract": "description",
"supported_formats": "formats",
"mime_type": "mediaType",
"min_occurs": "minOccurs",
"max_occurs": "maxOccurs",
}
replace_values = {
PACKAGE_ARRAY_MAX_SIZE: "unbounded",
}
replace_func = {
"maxOccurs": str,
"minOccurs": str,
}
transform_json(io_wps_json, rename=rename, replace_values=replace_values, replace_func=replace_func)
# in some cases (Complex I/O), 'as_reference=True' causes "type" to be overwritten, revert it back
if "type" in io_wps_json and io_wps_json["type"] == WPS_REFERENCE:
io_wps_json["type"] = WPS_COMPLEX
# minimum requirement of 1 format object which defines mime-type
if io_wps_json["type"] == WPS_COMPLEX:
# FIXME: should we store 'None' in db instead of empty string when missing "encoding", "schema", etc. ?
if "formats" not in io_wps_json or not len(io_wps_json["formats"]):
io_wps_json["formats"] = [DEFAULT_FORMAT.json]
for io_format in io_wps_json["formats"]:
transform_json(io_format, rename=rename, replace_values=replace_values, replace_func=replace_func)
# set 'default' format if it matches perfectly, or if only mime-type matches and it is the only available one
# (this avoid 'encoding' possibly not matching due to CWL not providing this information)
io_default = get_field(io_wps_json, "default", search_variations=True)
for io_format in io_wps_json["formats"]:
io_format["default"] = (io_default != null and is_equal_formats(io_format, io_default))
if io_default and len(io_wps_json["formats"]) == 1 and not io_wps_json["formats"][0]["default"]:
io_default_mime_type = get_field(io_default, "mime_type", search_variations=True)
io_single_fmt_mime_type = get_field(io_wps_json["formats"][0], "mime_type", search_variations=True)
io_wps_json["formats"][0]["default"] = (io_default_mime_type == io_single_fmt_mime_type)
elif io_wps_json["type"] == WPS_BOUNDINGBOX:
pass # FIXME: BoundingBox not implemented (https://github.com/crim-ca/weaver/issues/51)
else: # literal
domains = any2json_literal_data_domains(io_wps_json)
if domains:
io_wps_json["literalDataDomains"] = domains
return io_wps_json
def wps2json_job_payload(wps_request, wps_process):
# type: (WPSRequest, ProcessWPS) -> JSON
"""
Converts the input and output values of a :mod:`pywps` WPS ``Execute`` request to corresponding WPS-REST job.
The inputs and outputs must be parsed from XML POST payload or KVP GET query parameters, and converted to data
container defined by :mod:`pywps` based on the process definition.
"""
data = {
"inputs": [],
"outputs": [],
"response": EXECUTE_RESPONSE_DOCUMENT,
"mode": EXECUTE_MODE_ASYNC,
}
multi_inputs = list(wps_request.inputs.values())
for input_list in multi_inputs:
iid = get_any_id(input_list[0])
for input_value in input_list:
input_data = input_value.get("data")
input_href = input_value.get("href")
if input_data:
data["inputs"].append({"id": iid, "data": input_data})
elif input_href:
data["inputs"].append({"id": iid, "href": input_href})
output_ids = list(wps_request.outputs)
for output in wps_process.outputs:
oid = output.identifier
as_ref = isinstance(output, ComplexOutput)
if oid not in output_ids:
data_output = {"identifier": oid, "asReference": str(as_ref).lower()}
else:
data_output = wps_request.outputs[oid]
if as_ref:
data_output["transmissionMode"] = EXECUTE_TRANSMISSION_MODE_REFERENCE
else:
data_output["transmissionMode"] = EXECUTE_TRANSMISSION_MODE_VALUE
data_output["id"] = oid
data["outputs"].append(data_output)
return data
def get_field(io_object, field, search_variations=False, only_variations=False, pop_found=False, default=null):
# type: (Any, str, bool, bool, bool, Any) -> Any
"""
Gets a field by name from various I/O object types.
Default value is :py:data:`null` used for most situations to differentiate from literal ``None`` which is often
used as default for parameters. The :class:`NullType` allows to explicitly tell that there was 'no field' and
not 'no value' in existing field. If you provided another value, it will be returned if not found within the
input object.
When :paramref:`search_variation` is enabled and that :paramref:`field` could not be found within the object,
field lookup will employ the values under the :paramref:`field` entry within :data:`WPS_FIELD_MAPPING` as
additional field names to search for an existing property or key. Search continues until the first match is found,
respecting order within the variations listing, and finally uses :paramref:`default` if no match was found.
:param io_object: Any I/O representation, either as a class instance or JSON container.
:param field: Name of the field to look for, either as property or key name based on input object type.
:param search_variations: If enabled, search for all variations to the field name to attempt search until matched.
:param only_variations: If enabled, skip the first 'basic' field and start search directly with field variations.
:param pop_found: If enabled, whenever a match is found by field or variations, remove that entry from the object.
:param default: Alternative default value to return if no match could be found.
:returns: Matched value (including search variations if enabled), or ``default``.
"""
if not (search_variations and only_variations):
if isinstance(io_object, dict):
value = io_object.get(field, null)
if value is not null:
if pop_found:
io_object.pop(field)
return value
else:
value = getattr(io_object, field, null)
if value is not null:
return value
if search_variations and field in WPS_FIELD_MAPPING:
for var in WPS_FIELD_MAPPING[field]:
value = get_field(io_object, var, search_variations=False, only_variations=False, pop_found=pop_found)
if value is not null:
return value
return default
def set_field(io_object, field, value, force=False):
# type: (Union[ANY_IO_Type, ANY_Format_Type], str, Any, bool) -> None
"""
Sets a field by name into various I/O object types.
Field value is set only if not ``null`` to avoid inserting data considered `invalid`.
If ``force=True``, verification of ``null`` value is ignored.
"""
if value is not null or force:
if isinstance(io_object, dict):
io_object[field] = value
return
setattr(io_object, field, value)
def _are_different_and_set(item1, item2):
# type: (Any, Any) -> bool
"""
Verifies if two items are set and are different of different "representative" value.
Compares two value representations and returns ``True`` only if both are not ``null``, are of same ``type`` and
of different representative value. By "representative", we consider here the visual representation of byte/unicode
strings rather than literal values to support XML/JSON and Python 2/3 implementations.
Other non string-like types are verified with literal (usual) equality method.
"""
if item1 is null or item2 is null:
return False
try:
# Note:
# Calling ``==`` will result in one defined item's type ``__eq__`` method calling a property to validate
# equality with the second. When compared to a ``null``, ``None`` or differently typed second item, the
# missing property on the second item could raise and ``AssertionError`` depending on the ``__eq__``
# implementation (eg: ``Format`` checking for ``item.mime_type``, etc.).
equal = item1 == item2
except AttributeError:
return False
if equal:
return False
# Note: check for both (str, bytes) for any python implementation that modifies its value
type1 = str if isinstance(item1, (str, bytes)) else type(item1)
type2 = str if isinstance(item2, (str, bytes)) else type(item2)
if type1 is str and type2 is str:
return bytes2str(item1) != bytes2str(item2)
return True
def is_equal_formats(format1, format2):
# type: (Union[Format, JSON], Union[Format, JSON]) -> bool
"""
Verifies for matching formats.
"""
mime_type1 = get_field(format1, "mime_type", search_variations=True)
mime_type2 = get_field(format2, "mime_type", search_variations=True)
encoding1 = get_field(format1, "encoding", search_variations=True)
encoding2 = get_field(format2, "encoding", search_variations=True)
if (
mime_type1 == mime_type2 and encoding1 == encoding2
and all(f != null for f in [mime_type1, mime_type2, encoding1, encoding2])
):
return True
return False
def normalize_ordered_io(io_section, order_hints=None):
# type: (JSON_IO_ListOrMap, Optional[JSON_IO_ListOrMap]) -> List[JSON]
"""
Reorders and converts I/O from any representation (:class:`dict` or :class:`list`) considering given ordering hints.
First, converts I/O definitions defined as dictionary to an equivalent :class:`list` representation,
in order to work only with a single representation method. The :class:`list` is chosen over :class:`dict` because
sequences can enforce a specific order, while mapping have no particular order. The list representation ensures
that I/O order is preserved when written to file and reloaded afterwards regardless of each server and/or library's
implementation of the mapping container.
If this function fails to correctly order any I/O or cannot correctly guarantee such result because of the provided
parameters (e.g.: no hints given when required), the result will not break nor change the final processing behaviour
of parsers. This is merely *cosmetic* adjustments to ease readability of I/O to avoid always shuffling their order
across multiple :term:`Application Package` and :term:`Process` reporting formats.
The important result | |
#!/usr/bin/python
# coding: utf-8
import ctypes
import math
import sdl2 as SDL
# Heavily modified JsMB-SDL2 Alpha 11 58884391d121876a2269f10202c65a9761b25e78
class JsMB():
Mouse = {
'x': 0,
'y': 0,
'lcount': 0,
'rcount': 0
}
Gel = { 'Sprite': {} }
Font = {
'family': 'arial',
'size': '10'
}
Draw = {
'color': None,
'BGCOLOR': [255, 255, 255, 255],
'linewidth': 1
}
JsMobileBasic = {
'name': 'JsMobileBasic',
'version': 'Alpha 11',
'author': 'PROPHESSOR',
'url': 'http://vk.com/JsMobileBasic',
'Mobile': None, # Config.Mobile,
'Debug': True,
'canvas': None, # typeof document === 'undefined' ? null : document.getElementById('c'),
'graphics': True,
'supports': {
'document': False,
'window': False,
'browser': False,
'ls': False,
'module': False
}
}
Instance = {
'name': 'JsMobileBasic'
}
PI = math.pi,
G = 9.8 # TODO: DEPRECATE
RAD2DEG = 180 / math.pi
DEG2RAD = math.pi / 180
def __init__(self, config=None, canvas=None, renderer=None, main=None, loop=None):
# TODO:
self.SCW = 640
self.SCH = 480
self.debug('#===== Включён режим отладки =====#', 'color:gray;')
self.debug(self.JsMobileBasic['name'], 'background:gray;color:yellow;')
self.debug('v. ' + self.JsMobileBasic['version'], 'background:gray;color:yellow;')
self.debug('by ' + self.JsMobileBasic['author'], 'background:gray;color:yellow;')
self.debug(self.JsMobileBasic['url'], 'background:gray;color:yellow;')
self.debug('// ======Инициализация рабочей среды======//', 'color:gray;')
if config:
if 'name' in config:
self.Instance['name'] = config['name']
if 'Debug' in config:
self.JsMobileBasic['Debug'] = config['Debug']
self.debug('Используется графика!', 'background:black;color:yellow;')
# TODO: Read screen size from config
self.JsMobileBasic['canvas'] = self.c = canvas if canvas else SDL.SDL_CreateWindow(bytes(self.Instance['name'], 'utf-8'), SDL.SDL_WINDOWPOS_UNDEFINED, SDL.SDL_WINDOWPOS_UNDEFINED, self.SCW, self.SCH, SDL.SDL_WINDOW_SHOWN)
# TODO: SDL_WINDOW_FULLSCREEN
self.ctx = renderer if renderer else SDL.SDL_CreateRenderer(self.c, -1, SDL.SDL_RENDERER_ACCELERATED | SDL.SDL_RENDERER_PRESENTVSYNC)
self.setLineWidth(1)
self.setColor([255, 0, 0, 0])
self.fillScreen(255, 255, 255, 255)
self.repaint()
self.debug('Имя проекта: ' + self.Instance['name'], 'background:brown;color:yellow;')
self.Player = [None]
self.debug('// ======Инициализация интерпретатора======//', 'color:gray;')
self._main = main
self._loop = loop
if self._main:
self._main(self)
event = SDL.SDL_Event()
running = True
while running:
while SDL.SDL_PollEvent(ctypes.byref(event)) != 0:
if event.type == SDL.SDL_QUIT:
running = False
break
if self._loop:
self._loop(self) # TODO: Send keycode
SDL.SDL_DestroyRenderer(self.ctx)
SDL.SDL_DestroyWindow(self.c)
SDL.SDL_Quit()
@classmethod
def Debug(*args):
print(*args)
def debug(self, *args):
if self.JsMobileBasic['Debug']:
print(*args)
def setColor(self, *color):
''' Задать текущий цвет
* @param {array} color - Цвет в RGB формате
* @returns {self}
'''
if len(color) == 1: color = color[0]
cl = Color(color)
self.Draw['color'] = cl
SDL.SDL_SetRenderDrawColor(self.ctx, *cl.getRgbaArray())
return self
def setLineWidth(self, width):
''' Задать толщину линий
* @param {number} width - Толщина
* @returns {self}
'''
self.Draw['linewidth'] = width
return self
def delay(self, ms):
''' Задержка в ms
* @returns {self}
'''
SDL.SDL_Delay(ms)
return self
def cls(self):
''' Очищает экран
* @returns {self}
'''
tmp = self.Draw['color']
self.setColor(255, 255, 255, 255)
SDL.SDL_RenderClear(self.ctx)
self.setColor(tmp)
return self
def repaint(self):
''' Производит отрисовку на экран ранее произведенных действий
* В стандартной реализации ничего не делает
* @returns {self}
'''
SDL.SDL_RenderPresent(self.ctx)
return self
def drawPlot(self, x, y):
''' Рисует точку по координатам (заливает пиксель)
* @param {number} x - X координата точки
* @param {number} y - Y координата точки
* @returns {self}
'''
pass # TODO:
def drawLine(self, x1, y1, x2, y2):
''' Рисует линию по 2 точкам
* @param {number} x1 - X 1 точки
* @param {number} y1 - Y 1 точки
* @param {number} x2 - X 2 точки
* @param {number} y2 - Y 2 точки
* @returns {self}
'''
SDL.SDL_RenderDrawLine(self.ctx, int(x1), int(y1), int(x2), int(y2))
return self
def drawRect(self, x, y, w, h):
''' Рисует прямоугольник
* @param {number} x - Координата X левого верхнего угла
* @param {number} y - Координата Y левого верхнего угла
* @param {number} w - Ширина
* @param {number} h - Высота
* @returns {self}
'''
rect = SDL.SDL_Rect(int(x), int(y), int(w), int(h))
SDL.SDL_RenderDrawRect(self.ctx, rect)
return self
def fillRect(self, x, y, w, h):
''' Рисует залитый прямоугольник
* @param {number} x - Координата X левого верхнего угла
* @param {number} y - Координата Y левого верхнего угла
* @param {number} w - Ширина
* @param {number} h - Высота
* @returns {self}
'''
rect = SDL.SDL_Rect(int(x), int(y), int(w), int(h))
SDL.SDL_RenderDrawRect(self.ctx, rect)
SDL.SDL_RenderFillRect(self.ctx, rect)
return self
def fillScreen(self, *color):
''' Заливает экран выбранным цветом
* @param {string} color - Цвет в CSS формате
* @returns {self}
'''
cl = self.Draw['color']
self.setColor(color)
self.fillRect(0, 0, self.screenWidth(), self.screenHeight())
self.setColor(cl)
# Getters
def screenWidth(self):
''' Возвращает ширину экрана
* @returns {number}
'''
return self.SCW
def screenHeight(self):
''' Возвращает высоту экрана
* @returns {number}
'''
return self.SCH
# ''' Переключить полноэкранный режим
# * @param {bool} mode - True - включить, False - отключить
# * @returns {self}
# '''
# fullScreen(mode) {
# if (self.JsMobileBasic.supports.document) {
# if (mode) {
# if (document.documentElement.requestFullscreen)
# document.documentElement.requestFullScreen()
# else if (document.documentElement.webkitRequestFullScreen)
# document.documentElement.webkitRequestFullScreen()
# } else {
# if (document.cancelFullScreen)
# document.cancelFullScreen()
# else if (document.webkitCancelFullScreen)
# document.webkitCancelFullScreen()
# }
# return self
# }
# self.debug('Работа в полноэкранном режиме невозможна!')
# return False
# },
# ''' Заливает экран выбранным цветом
# * @param {string} color - Цвет в CSS формате
# * @returns {self}
# '''
# fillScreen(color) {
# const tmp = self.ctx.color
# self.setColor(color)
# self.fillRect(0, 0, self.screenWidth(), self.screenHeight())
# self.ctx.color = tmp
# return self
# },
# ''' Рисует прямоугольник
# * @param {number} x - Координата X левого верхнего угла
# * @param {number} y - Координата Y левого верхнего угла
# * @param {number} w - Ширина
# * @param {number} h - Высота
# * @returns {self}
# '''
# drawRect(x, y, w, h) {
# self.ctx.drawRect([x, y, x1, y1])
# return self
# },
# ''' Рисует точку по координатам (заливает пиксель)
# * @param {number} x - X координата точки
# * @param {number} y - Y координата точки
# * @returns {self}
# '''
# drawPlot(x, y) {
# self.ctx.drawPoint([[x, y]])
# return self
# },
# ''' Очищяет прямоугольную область
# * @param {number} x - Координата X левого верхнего угла
# * @param {number} y - Координата Y левого верхнего угла
# * @param {number} w - Ширина
# * @param {number} h - Высота
# * @returns {self}
# '''
# clearRect(x, y, w, h) {
# const tmp = self.ctx.color
# self.setColor(self.Draw.BGCOLOR)
# self.fillRect(x, y, w, h)
# self.ctx.color = tmp
# return self
# },
# ''' Рисует линию по 2 точкам
# * @param {number} x1 - X 1 точки
# * @param {number} y1 - Y 1 точки
# * @param {number} x2 - X 2 точки
# * @param {number} y2 - Y 2 точки
# * @returns {self}
# '''
# drawLine(x1, y1, x2, y2) {
# if(x1 === Infinity || x2 === Infinity || y1 === Infinity || y2 === Infinity) return self
# self.gfx.line(x1, y1, x2, y2, self.Draw.color.getNumber(), self.Draw._lineWidth)
# return self
# },
# ''' Рисует проекцию паралелепипеда (через 2 соединенных прямоугольника)
# * @param {number} x - X левого верхнего угла
# * @param {number} y - Y левого верхнего угла
# * @param {number} w - ширина
# * @param {number} h - высота
# * @param {number} q - глубина
# * @returns {self}
# '''
# drawCube(x, y, w, h, q) {
# self.ctx.strokeRect(x, y, w, h)
# self.ctx.strokeRect(x + (q / Math.sqrt(2)), y + (q / Math.sqrt(2)), w, h)
# self.drawLine(x, y, x + (q / Math.sqrt(2)), y + (q / Math.sqrt(2)))
# self.drawLine(x + w, y, x + w + (q / Math.sqrt(2)), y + (q / Math.sqrt(2)))
# self.drawLine(x, y + h, x + (q / Math.sqrt(2)), y + h + (q / Math.sqrt(2)))
# self.drawLine(x + w, y + h, x + w + (q / Math.sqrt(2)), y + h + (q / Math.sqrt(2)))
# return self
# },
# ''' Рисует залитую окружность
# * @param {number} x - X центра
# * @param {number} y - Y центра
# * @param {number} radius - радиус
# * @param {number} startAngle=(15*PI/7) - Угол начала
# * @param {number} endAngle=(13*PI/2) - Угол конца
# * @param {bool} counterClockwise=False - По часовой стрелке?
# * @returns {self}
# '''
# drawArc(x, y, radius,
# startAngle,// = (15 * Math.PI / 7),
# endAngle,// = (13 * Math.PI / 2),
# counterClockwise = False) {
# if (!startAngle) {
# self.gfx.ellipse(x, y, radius, radius, self.Draw.color.getNumber())
# } else {
# self.gfx.pie(x, y, radius, self.deg(startAngle), self.deg(endAngle), self.Draw.color.getNumber())
# }
# return self
# },
# ''' Рисует залитую окружность
# * @param {number} x - X центра
# * @param {number} y - Y центра
# * @param {number} radius - радиус
# * @param {number} startAngle=(15*PI/7) - Угол начала
# * @param {number} endAngle=(13*PI/2) - Угол конца
# * @param {bool} counterClockwise=False - По часовой стрелке?
# * @returns {self}
# '''
# fillArc(x, y, radius,
# startAngle, // = (15 * Math.PI / 7),
# endAngle = (13 * Math.PI / 2),
# counterClockwise = False) {
# if (!startAngle) | |
*(string) --*
The default value.
- **ParameterType** *(string) --*
The parameter type.
- **IsNoEcho** *(boolean) --*
If this value is true, the value for this parameter is obfuscated from view when the parameter is retrieved. This parameter is used to hide sensitive information.
- **Description** *(string) --*
The description of the parameter.
- **ParameterConstraints** *(dict) --*
Constraints that the administrator has put on a parameter.
- **AllowedValues** *(list) --*
The values that the administrator has allowed for the parameter.
- *(string) --*
- **ConstraintSummaries** *(list) --*
Information about the constraints used to provision the product.
- *(dict) --*
Summary information about a constraint.
- **Type** *(string) --*
The type of constraint.
* ``LAUNCH``
* ``NOTIFICATION``
* STACKSET
* ``TEMPLATE``
- **Description** *(string) --*
The description of the constraint.
- **UsageInstructions** *(list) --*
Any additional metadata specifically related to the provisioning of the product. For example, see the ``Version`` field of the CloudFormation template.
- *(dict) --*
Additional information provided by the administrator.
- **Type** *(string) --*
The usage instruction type for the value.
- **Value** *(string) --*
The usage instruction value for this type.
- **TagOptions** *(list) --*
Information about the TagOptions associated with the resource.
- *(dict) --*
Summary information about a TagOption.
- **Key** *(string) --*
The TagOption key.
- **Values** *(list) --*
The TagOption value.
- *(string) --*
- **ProvisioningArtifactPreferences** *(dict) --*
An object that contains information about preferences, such as regions and accounts, for the provisioning artifact.
- **StackSetAccounts** *(list) --*
One or more AWS accounts where stack instances are deployed from the stack set. These accounts can be scoped in ``ProvisioningPreferences$StackSetAccounts`` and ``UpdateProvisioningPreferences$StackSetAccounts`` .
Applicable only to a ``CFN_STACKSET`` provisioned product type.
- *(string) --*
- **StackSetRegions** *(list) --*
One or more AWS Regions where stack instances are deployed from the stack set. These regions can be scoped in ``ProvisioningPreferences$StackSetRegions`` and ``UpdateProvisioningPreferences$StackSetRegions`` .
Applicable only to a ``CFN_STACKSET`` provisioned product type.
- *(string) --*
:type AcceptLanguage: string
:param AcceptLanguage:
The language code.
* ``en`` - English (default)
* ``jp`` - Japanese
* ``zh`` - Chinese
:type ProductId: string
:param ProductId: **[REQUIRED]**
The product identifier.
:type ProvisioningArtifactId: string
:param ProvisioningArtifactId: **[REQUIRED]**
The identifier of the provisioning artifact.
:type PathId: string
:param PathId:
The path identifier of the product. This value is optional if the product has a default path, and required if the product has more than one path. To list the paths for a product, use ListLaunchPaths .
:rtype: dict
:returns:
"""
pass
def describe_record(self, Id: str, AcceptLanguage: str = None, PageToken: str = None, PageSize: int = None) -> Dict:
"""
Gets information about the specified request operation.
Use this operation after calling a request operation (for example, ProvisionProduct , TerminateProvisionedProduct , or UpdateProvisionedProduct ).
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/servicecatalog-2015-12-10/DescribeRecord>`_
**Request Syntax**
::
response = client.describe_record(
AcceptLanguage='string',
Id='string',
PageToken='string',
PageSize=123
)
**Response Syntax**
::
{
'RecordDetail': {
'RecordId': 'string',
'ProvisionedProductName': 'string',
'Status': 'CREATED'|'IN_PROGRESS'|'IN_PROGRESS_IN_ERROR'|'SUCCEEDED'|'FAILED',
'CreatedTime': datetime(2015, 1, 1),
'UpdatedTime': datetime(2015, 1, 1),
'ProvisionedProductType': 'string',
'RecordType': 'string',
'ProvisionedProductId': 'string',
'ProductId': 'string',
'ProvisioningArtifactId': 'string',
'PathId': 'string',
'RecordErrors': [
{
'Code': 'string',
'Description': 'string'
},
],
'RecordTags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
'RecordOutputs': [
{
'OutputKey': 'string',
'OutputValue': 'string',
'Description': 'string'
},
],
'NextPageToken': 'string'
}
**Response Structure**
- *(dict) --*
- **RecordDetail** *(dict) --*
Information about the product.
- **RecordId** *(string) --*
The identifier of the record.
- **ProvisionedProductName** *(string) --*
The user-friendly name of the provisioned product.
- **Status** *(string) --*
The status of the provisioned product.
* ``CREATED`` - The request was created but the operation has not started.
* ``IN_PROGRESS`` - The requested operation is in progress.
* ``IN_PROGRESS_IN_ERROR`` - The provisioned product is under change but the requested operation failed and some remediation is occurring. For example, a rollback.
* ``SUCCEEDED`` - The requested operation has successfully completed.
* ``FAILED`` - The requested operation has unsuccessfully completed. Investigate using the error messages returned.
- **CreatedTime** *(datetime) --*
The UTC time stamp of the creation time.
- **UpdatedTime** *(datetime) --*
The time when the record was last updated.
- **ProvisionedProductType** *(string) --*
The type of provisioned product. The supported values are ``CFN_STACK`` and ``CFN_STACKSET`` .
- **RecordType** *(string) --*
The record type.
* ``PROVISION_PRODUCT``
* ``UPDATE_PROVISIONED_PRODUCT``
* ``TERMINATE_PROVISIONED_PRODUCT``
- **ProvisionedProductId** *(string) --*
The identifier of the provisioned product.
- **ProductId** *(string) --*
The product identifier.
- **ProvisioningArtifactId** *(string) --*
The identifier of the provisioning artifact.
- **PathId** *(string) --*
The path identifier.
- **RecordErrors** *(list) --*
The errors that occurred.
- *(dict) --*
The error code and description resulting from an operation.
- **Code** *(string) --*
The numeric value of the error.
- **Description** *(string) --*
The description of the error.
- **RecordTags** *(list) --*
One or more tags.
- *(dict) --*
Information about a tag, which is a key-value pair.
- **Key** *(string) --*
The key for this tag.
- **Value** *(string) --*
The value for this tag.
- **RecordOutputs** *(list) --*
Information about the product created as the result of a request. For example, the output for a CloudFormation-backed product that creates an S3 bucket would include the S3 bucket URL.
- *(dict) --*
The output for the product created as the result of a request. For example, the output for a CloudFormation-backed product that creates an S3 bucket would include the S3 bucket URL.
- **OutputKey** *(string) --*
The output key.
- **OutputValue** *(string) --*
The output value.
- **Description** *(string) --*
The description of the output.
- **NextPageToken** *(string) --*
The page token to use to retrieve the next set of results. If there are no additional results, this value is null.
:type AcceptLanguage: string
:param AcceptLanguage:
The language code.
* ``en`` - English (default)
* ``jp`` - Japanese
* ``zh`` - Chinese
:type Id: string
:param Id: **[REQUIRED]**
The record identifier of the provisioned product. This identifier is returned by the request operation.
:type PageToken: string
:param PageToken:
The page token for the next set of results. To retrieve the first set of results, use null.
:type PageSize: integer
:param PageSize:
The maximum number of items to return with this call.
:rtype: dict
:returns:
"""
pass
def describe_service_action(self, Id: str, AcceptLanguage: str = None) -> Dict:
"""
Describes a self-service action.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/servicecatalog-2015-12-10/DescribeServiceAction>`_
**Request Syntax**
::
response = client.describe_service_action(
Id='string',
AcceptLanguage='string'
)
**Response Syntax**
::
{
'ServiceActionDetail': {
'ServiceActionSummary': {
'Id': 'string',
'Name': 'string',
'Description': 'string',
'DefinitionType': 'SSM_AUTOMATION'
},
'Definition': {
'string': 'string'
}
}
}
**Response Structure**
- *(dict) --*
- **ServiceActionDetail** *(dict) --*
Detailed information about the self-service action.
- **ServiceActionSummary** *(dict) --*
Summary information about the self-service action.
- **Id** *(string) --*
The self-service action identifier.
- **Name** *(string) --*
The self-service action name.
- **Description** *(string) --*
The self-service action description.
- **DefinitionType** *(string) --*
The self-service action definition type. For example, ``SSM_AUTOMATION`` .
- **Definition** *(dict) --*
A map that defines the self-service action.
- *(string) --*
- *(string) --*
:type Id: string
:param Id: **[REQUIRED]**
The self-service action identifier.
:type AcceptLanguage: string
:param AcceptLanguage:
The language code.
* ``en`` - English (default)
* ``jp`` - Japanese
* ``zh`` - Chinese
:rtype: dict
:returns:
"""
pass
def describe_tag_option(self, Id: str) -> Dict:
"""
Gets information about the specified TagOption.
See also: `AWS API Documentation | |
{
'type': file,
'description': 'The optional parent diff to upload.',
},
}
)
def create(self, request, *args, **kwargs):
"""Creates a new diff by parsing an uploaded diff file.
This will implicitly create the new Review Request draft, which can
be updated separately and then published.
This accepts a unified diff file, validates it, and stores it along
with the draft of a review request. The new diff will have a revision
of 0.
A parent diff can be uploaded along with the main diff. A parent diff
is a diff based on an existing commit in the repository, which will
be applied before the main diff. The parent diff will not be included
in the diff viewer. It's useful when developing a change based on a
branch that is not yet committed. In this case, a parent diff of the
parent branch would be provided along with the diff of the new commit,
and only the new commit will be shown.
It is expected that the client will send the data as part of a
:mimetype:`multipart/form-data` mimetype. The main diff's name and
content would be stored in the ``path`` field. If a parent diff is
provided, its name and content would be stored in the
``parent_diff_path`` field.
An example of this would be::
-- SoMe BoUnDaRy
Content-Disposition: form-data; name=path; filename="foo.diff"
<Unified Diff Content Here>
-- SoMe BoUnDaRy --
"""
try:
review_request = \
review_request_resource.get_object(request, *args, **kwargs)
except ReviewRequest.DoesNotExist:
return DOES_NOT_EXIST
if not review_request.is_mutable_by(request.user):
return _no_access_error(request.user)
form_data = request.POST.copy()
form = UploadDiffForm(review_request, form_data, request.FILES)
if not form.is_valid():
return INVALID_FORM_DATA, {
'fields': _get_form_errors(form),
}
try:
diffset = form.create(request.FILES['path'],
request.FILES.get('parent_diff_path'))
except FileNotFoundError, e:
return REPO_FILE_NOT_FOUND, {
'file': e.path,
'revision': e.revision
}
except EmptyDiffError, e:
return DIFF_EMPTY
except DiffTooBigError, e:
return DIFF_TOO_BIG, {
'reason': str(e),
'max_size': MAX_DIFF_SIZE,
}
except Exception, e:
# This could be very wrong, but at least they'll see the error.
# We probably want a new error type for this.
logging.error("Error uploading new diff: %s", e, exc_info=1)
return INVALID_FORM_DATA, {
'fields': {
'path': [str(e)]
}
}
discarded_diffset = None
try:
draft = review_request.draft.get()
if draft.diffset and draft.diffset != diffset:
discarded_diffset = draft.diffset
except ReviewRequestDraft.DoesNotExist:
try:
draft = ReviewRequestDraftResource.prepare_draft(
request, review_request)
except PermissionDenied:
return _no_access_error(request.user)
draft.diffset = diffset
# We only want to add default reviewers the first time. Was bug 318.
if review_request.diffset_history.diffsets.count() == 0:
draft.add_default_reviewers();
draft.save()
if discarded_diffset:
discarded_diffset.delete()
# E-mail gets sent when the draft is saved.
return 201, {
self.item_result_key: diffset,
}
diffset_resource = DiffResource()
class BaseWatchedObjectResource(WebAPIResource):
"""A base resource for objects watched by a user."""
watched_resource = None
uri_object_key = 'watched_obj_id'
profile_field = None
star_function = None
unstar_function = None
allowed_methods = ('GET', 'POST', 'DELETE')
@property
def uri_object_key_regex(self):
return self.watched_resource.uri_object_key_regex
def get_queryset(self, request, username, local_site_name=None,
*args, **kwargs):
try:
local_site = _get_local_site(local_site_name)
if local_site:
user = local_site.users.get(username=username)
profile = user.get_profile()
else:
profile = Profile.objects.get(user__username=username)
q = self.watched_resource.get_queryset(
request, local_site_name=local_site_name, *args, **kwargs)
q = q.filter(starred_by=profile)
return q
except Profile.DoesNotExist:
return self.watched_resource.model.objects.none()
@webapi_check_login_required
def get(self, request, watched_obj_id, *args, **kwargs):
try:
q = self.get_queryset(request, *args, **kwargs)
obj = q.get(pk=watched_obj_id)
except ObjectDoesNotExist:
return DOES_NOT_EXIST
return HttpResponseRedirect(
self.watched_resource.get_href(obj, request, *args, **kwargs))
@webapi_check_login_required
@webapi_response_errors(DOES_NOT_EXIST)
def get_list(self, request, *args, **kwargs):
# TODO: Handle pagination and ?counts-only=1
try:
objects = [
self.serialize_object(obj)
for obj in self.get_queryset(request, is_list=True, *args, **kwargs)
]
return 200, {
self.list_result_key: objects,
}
except User.DoesNotExist:
return DOES_NOT_EXIST
@webapi_check_local_site
@webapi_login_required
@webapi_response_errors(DOES_NOT_EXIST, NOT_LOGGED_IN, PERMISSION_DENIED)
@webapi_request_fields(required={
'object_id': {
'type': str,
'description': 'The ID of the object to watch.',
},
})
def create(self, request, object_id, *args, **kwargs):
try:
obj_kwargs = kwargs.copy()
obj_kwargs[self.watched_resource.uri_object_key] = object_id
obj = self.watched_resource.get_object(request, *args, **obj_kwargs)
user = user_resource.get_object(request, *args, **kwargs)
except ObjectDoesNotExist:
return DOES_NOT_EXIST
if not user_resource.has_modify_permissions(request, user,
*args, **kwargs):
return _no_access_error(request.user)
profile, profile_is_new = \
Profile.objects.get_or_create(user=request.user)
star = getattr(profile, self.star_function)
star(obj)
return 201, {
self.item_result_key: obj,
}
@webapi_check_local_site
@webapi_login_required
def delete(self, request, watched_obj_id, *args, **kwargs):
try:
obj_kwargs = kwargs.copy()
obj_kwargs[self.watched_resource.uri_object_key] = watched_obj_id
obj = self.watched_resource.get_object(request, *args, **obj_kwargs)
user = user_resource.get_object(request, *args, **kwargs)
except ObjectDoesNotExist:
return DOES_NOT_EXIST
if not user_resource.has_modify_permissions(request, user,
*args, **kwargs):
return _no_access_error(request.user)
profile, profile_is_new = \
Profile.objects.get_or_create(user=request.user)
if not profile_is_new:
unstar = getattr(profile, self.unstar_function)
unstar(obj)
return 204, {}
def serialize_object(self, obj, *args, **kwargs):
return {
'id': obj.pk,
self.item_result_key: obj,
}
class WatchedReviewGroupResource(BaseWatchedObjectResource):
"""Lists and manipulates entries for review groups watched by the user.
These are groups that the user has starred in their Dashboard.
This resource can be used for listing existing review groups and adding
new review groups to watch.
Each item in the resource is an association between the user and the
review group. The entries in the list are not the review groups themselves,
but rather an entry that represents this association by listing the
association's ID (which can be used for removing the association) and
linking to the review group.
"""
name = 'watched_review_group'
uri_name = 'review-groups'
profile_field = 'starred_groups'
star_function = 'star_review_group'
unstar_function = 'unstar_review_group'
@property
def watched_resource(self):
"""Return the watched resource.
This is implemented as a property in order to work around
a circular reference issue.
"""
return review_group_resource
@webapi_check_local_site
@augment_method_from(BaseWatchedObjectResource)
def get(self, *args, **kwargs):
"""Returned an :http:`302` pointing to the review group being
watched.
Rather than returning a body with the entry, performing an HTTP GET
on this resource will redirect the client to the actual review group
being watched.
Clients must properly handle :http:`302` and expect this redirect
to happen.
"""
pass
@webapi_check_local_site
@augment_method_from(BaseWatchedObjectResource)
def get_list(self, *args, **kwargs):
"""Retrieves the list of watched review groups.
Each entry in the list consists of a numeric ID that represents the
entry for the watched review group. This is not necessarily the ID
of the review group itself. It's used for looking up the resource
of the watched item so that it can be removed.
"""
pass
@webapi_check_local_site
@augment_method_from(BaseWatchedObjectResource)
def create(self, *args, **kwargs):
"""Marks a review group as being watched.
The ID of the review group must be passed as ``object_id``, and will
store that review group in the list.
"""
pass
@webapi_check_local_site
@augment_method_from(BaseWatchedObjectResource)
def delete(self, *args, **kwargs):
"""Deletes a watched review group entry.
This is the same effect as unstarring a review group. It does
not actually delete the review group, just the entry in the list.
"""
pass
watched_review_group_resource = WatchedReviewGroupResource()
class WatchedReviewRequestResource(BaseWatchedObjectResource):
"""Lists and manipulates entries for review requests watched by the user.
These are requests that the user has starred in their Dashboard.
This resource can be used for listing existing review requests and adding
new review requests to watch.
Each item in the resource is an association between the user and the
review request. The entries in the list are not the review requests
themselves, but rather an entry that represents this association by
listing the association's ID (which can be used for removing the
association) and linking to the review request.
"""
name = 'watched_review_request'
uri_name = 'review-requests'
profile_field = 'starred_review_requests'
star_function = 'star_review_request'
unstar_function = 'unstar_review_request'
@property
def watched_resource(self):
"""Return the watched resource.
This is implemented as a property in order to work around
a circular reference issue.
"""
return review_request_resource
@webapi_check_local_site
@augment_method_from(BaseWatchedObjectResource)
def get(self, *args, **kwargs):
"""Returned an :http:`302` pointing to the review request being
watched.
Rather than returning a body with the entry, performing an HTTP GET
on this resource will redirect the client to the actual review request
being watched.
Clients must properly handle :http:`302` and expect this redirect
to happen.
"""
pass
@webapi_check_local_site
@augment_method_from(BaseWatchedObjectResource)
def get_list(self, *args, **kwargs):
"""Retrieves the list of watched review requests.
Each entry in the list consists of a numeric ID that represents the
entry for the watched review request. This is not necessarily the ID
of the review request itself. It's used for looking up the resource
of the watched item so that it can be removed.
"""
pass
@webapi_check_local_site
@augment_method_from(BaseWatchedObjectResource)
def create(self, *args, **kwargs):
"""Marks a review request as being watched.
The ID of the review group must be passed as ``object_id``, | |
"""
GT Utilities
@<NAME> - <EMAIL> - 2020-09-13
Functions were named with a "gtu" (GT Utilities) prefix to avoid conflicts.
1.1 - 2020-10-17
Added move pivot to bottom/top
Added copy/paste material
Added move to origin
1.2 - 2020-10-21
Updated reset transform to better handle translate
Added Uniform LRA Toggle
Changed the order of the functions to match the menu
1.3 - 2020-11-11
Updates "gtu_import_references" to better handle unloaded references
Added "gtu_remove_references"
Added "gtu_combine_curves"
Added "gtu_separate_curves"
1.4 - 2020-11-13
Updated combine and separate functions to work with bezier curves
1.5 - 2020-11-14
Added "gtu_convert_bif_to_mesh"
1.6 - 2020-11-16
Added "gtu_delete_nucleus_nodes"
Updated "gtu_delete_display_layers" to have inView feedback
Updated "gtu_delete_keyframes" to have inView feedback
1.7 - 2020-11-22
Updated text for the about window
1.8 - 2020-12-03
Changed the background color for the title in the "About" window
Changed the order of a few functions
Added function to unlock/unhide default channels
1.9 - 2021-01-05
Added Uniform Joint Label Toggle
To Do:
Add proper error handling to all functions.
New functions:
Reset Display Type and Color
Find/Rename non-unique names - Enforce unique names
Remove Custom Colors - select object types, outliner or viewport - colorPickCursor.png - use string to determine a list of types
Assign lambert to everything function (Maybe assing to objects missing shaders)
Add Unlock all attributes
Add unhide attributes (provide list?)
Add Remove pasted_ function
Add assign checkboard function (already in bonus tools > rendering)
Force focus (focus without looking at children)
Brute force clean models (export OBJ and reimport)
New options:
Import all references : Add function to use a string to ignore certain references
Reset Transforms : Add reset only translate, rotate or scale
Delete all keyframes : Include option to delete or not set driven keys
Reset persp camera : Reset all other attributes too (including transform?)
Delete Display Layers : only empty? ignore string?
Delete Namespaces : only empty? ignore string?
"""
import maya.cmds as cmds
import maya.mel as mel
from maya import OpenMayaUI as omui
try:
from shiboken2 import wrapInstance
except ImportError:
from shiboken import wrapInstance
try:
from PySide2.QtGui import QIcon
from PySide2.QtWidgets import QWidget
except ImportError:
from PySide.QtGui import QIcon, QWidget
# Script Version
gtu_script_version = "1.9"
''' ____________________________ General Functions ____________________________'''
def gtu_reload_file():
''' Reopens the opened file (to revert back any changes done to the file) '''
if cmds.file(query=True, exists=True): # Check to see if it was ever saved
file_path = cmds.file(query=True, expandName=True)
if file_path is not None:
cmds.file(file_path, open=True, force=True)
else:
cmds.warning('File was never saved.')
def gtu_open_resource_browser():
''' Opens Maya's Resource Browser '''
try:
import maya.app.general.resourceBrowser as resourceBrowser
resourceBrowser.resourceBrowser().run()
except:
pass
def gtu_unlock_default_channels():
''' Unlocks Translate, Rotate, Scale for the selected objects '''
function_name = 'GTU Unlock Default Channels'
errors = ''
cmds.undoInfo(openChunk=True, chunkName=function_name) # Start undo chunk
selection = cmds.ls(selection=True, long=True)
unlocked_counter = 0
try:
for obj in selection:
try:
cmds.setAttr(obj + '.translateX', lock=False)
cmds.setAttr(obj + '.translateY', lock=False)
cmds.setAttr(obj + '.translateZ', lock=False)
cmds.setAttr(obj + '.rotateX', lock=False)
cmds.setAttr(obj + '.rotateY', lock=False)
cmds.setAttr(obj + '.rotateZ', lock=False)
cmds.setAttr(obj + '.scaleX', lock=False)
cmds.setAttr(obj + '.scaleY', lock=False)
cmds.setAttr(obj + '.scaleZ', lock=False)
cmds.setAttr(obj + '.v', lock=False)
unlocked_counter += 1
except Exception as e:
errors += str(e) + '\n'
if errors != '':
print('#### Errors: ####')
print(errors)
cmds.warning('Some channels were not unlocked . Open the script editor for a list of errors.')
except:
pass
finally:
cmds.undoInfo(closeChunk=True, chunkName=function_name)
message = '<span style=\"color:#FF0000;text-decoration:underline;\">' + str(unlocked_counter) + ' </span>'
is_plural = 'objects had their'
if unlocked_counter == 1:
is_plural = 'object had its'
message += is_plural + ' default channels unlocked.'
cmds.inViewMessage(amg=message, pos='botLeft', fade=True, alpha=.9)
def gtu_unhide_default_channels():
''' Unhides Translate, Rotate, Scale for the selected objects '''
function_name = 'GTU Unhide Default Channels'
errors = ''
cmds.undoInfo(openChunk=True, chunkName=function_name) # Start undo chunk
selection = cmds.ls(selection=True, long=True)
unlocked_counter = 0
try:
for obj in selection:
try:
cmds.setAttr(obj + '.translateX', keyable=True)
cmds.setAttr(obj + '.translateY', keyable=True)
cmds.setAttr(obj + '.translateZ', keyable=True)
cmds.setAttr(obj + '.rotateX', keyable=True)
cmds.setAttr(obj + '.rotateY', keyable=True)
cmds.setAttr(obj + '.rotateZ', keyable=True)
cmds.setAttr(obj + '.scaleX', keyable=True)
cmds.setAttr(obj + '.scaleY', keyable=True)
cmds.setAttr(obj + '.scaleZ', keyable=True)
cmds.setAttr(obj + '.v', keyable=True)
unlocked_counter += 1
except Exception as e:
errors += str(e) + '\n'
if errors != '':
print('#### Errors: ####')
print(errors)
cmds.warning('Some channels were not made visible. Open the script editor for a list of errors.')
except:
pass
finally:
cmds.undoInfo(closeChunk=True, chunkName=function_name)
message = '<span style=\"color:#FF0000;text-decoration:underline;\">' + str(unlocked_counter) + ' </span>'
is_plural = 'objects had their'
if unlocked_counter == 1:
is_plural = 'object had its'
message += is_plural + ' default channels made visible.'
cmds.inViewMessage(amg=message, pos='botLeft', fade=True, alpha=.9)
def gtu_uniform_lra_toggle():
'''
Makes the visibility of the Local Rotation Axis uniform among
the selected objects according to the current state of the majority of them.
'''
function_name = 'GTU Uniform LRA Toggle'
cmds.undoInfo(openChunk=True, chunkName=function_name)
try:
errors = ''
selection = cmds.ls(selection=True)
inactive_lra = []
active_lra = []
for obj in selection:
try:
current_lra_state = cmds.getAttr(obj + '.displayLocalAxis')
if current_lra_state:
active_lra.append(obj)
else:
inactive_lra.append(obj)
except Exception as e:
errors += str(e) + '\n'
if len(active_lra) == 0:
for obj in inactive_lra:
try:
cmds.setAttr(obj + '.displayLocalAxis', 1)
except Exception as e:
errors += str(e) + '\n'
elif len(inactive_lra) == 0:
for obj in active_lra:
try:
cmds.setAttr(obj + '.displayLocalAxis', 0)
except Exception as e:
errors += str(e) + '\n'
elif len(active_lra) > len(inactive_lra):
for obj in inactive_lra:
try:
cmds.setAttr(obj + '.displayLocalAxis', 1)
except Exception as e:
errors += str(e) + '\n'
else:
for obj in active_lra:
try:
cmds.setAttr(obj + '.displayLocalAxis', 0)
except Exception as e:
errors += str(e) + '\n'
if errors != '':
print('#### Errors: ####')
print(errors)
cmds.warning('The script couldn\'t read or write some LRA states. Open script editor for more info.')
except:
pass
finally:
cmds.undoInfo(closeChunk=True, chunkName=function_name)
def gtu_uniform_jnt_label_toggle():
'''
Makes the visibility of the Joint Labels uniform according to the current state of the majority of them.
'''
function_name = 'GTU Uniform Joint Label Toggle'
cmds.undoInfo(openChunk=True, chunkName=function_name)
try:
errors = ''
joints = cmds.ls(type='joint', long=True)
inactive_label = []
active_label = []
for obj in joints:
try:
current_label_state = cmds.getAttr(obj + '.drawLabel')
if current_label_state:
active_label.append(obj)
else:
inactive_label.append(obj)
except Exception as e:
errors += str(e) + '\n'
if len(active_label) == 0:
for obj in inactive_label:
try:
cmds.setAttr(obj + '.drawLabel', 1)
except Exception as e:
errors += str(e) + '\n'
elif len(inactive_label) == 0:
for obj in active_label:
try:
cmds.setAttr(obj + '.drawLabel', 0)
except Exception as e:
errors += str(e) + '\n'
elif len(active_label) > len(inactive_label):
for obj in inactive_label:
try:
cmds.setAttr(obj + '.drawLabel', 1)
except Exception as e:
errors += str(e) + '\n'
else:
for obj in active_label:
try:
cmds.setAttr(obj + '.drawLabel', 0)
except Exception as e:
errors += str(e) + '\n'
if errors != '':
print('#### Errors: ####')
print(errors)
cmds.warning('The script couldn\'t read or write some "drawLabel" states. Open script editor for more info.')
except:
pass
finally:
cmds.undoInfo(closeChunk=True, chunkName=function_name)
def gtu_import_references():
''' Imports all references '''
try:
errors = ''
refs = cmds.ls(rf=True)
for i in refs:
try:
r_file = cmds.referenceQuery(i, f=True)
cmds.file(r_file, importReference=True)
except Exception as e:
errors += str(e) + '(' + r_file + ')\n'
except:
cmds.warning("Something went wrong. Maybe you don't have any references to import?")
if errors != '':
cmds.warning('Not all references were imported. Open the script editor for more information.')
print(('#' * 50) + '\n')
print(errors)
print('#' * 50)
def gtu_remove_references():
''' Removes all references '''
try:
errors = ''
refs = cmds.ls(rf=True)
for i in refs:
try:
r_file = cmds.referenceQuery(i, f=True)
cmds.file(r_file, removeReference=True)
except Exception as e:
errors += str(e) + '(' + r_file + ')\n'
except:
cmds.warning("Something went wrong. Maybe you don't have any references to import?")
if errors != '':
cmds.warning('Not all references were removed. Open the script editor for more information.')
print(('#' * 50) + '\n')
print(errors)
print('#' * | |
import os
import sys
import traceback
from typing import Callable, Generator, List, Tuple
import pandas as pd
import seaborn as sns
from PySide2.QtCore import QEvent, QObject, QRunnable, QThreadPool, Qt, Signal, Slot
from PySide2.QtGui import QIcon, QPixmap
from PySide2.QtWidgets import (QApplication, QCheckBox, QComboBox, QDialog, QFileDialog, QFormLayout, QFrame, QLabel,
QLineEdit, QMainWindow, QMessageBox, QPushButton, QSizePolicy, QVBoxLayout, QWidget)
from matplotlib import use as set_backend
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas, NavigationToolbar2QT as NavBar
from matplotlib.figure import Figure
from matplotlib.lines import Line2D
from xlrd import XLRDError
from src.utils import get_project_root
set_backend('Qt5Agg')
sns.set(style="whitegrid")
class ViolinGUI(QMainWindow):
"""Main Window Widget for ViolinGUI."""
style = {
'title': 'QLabel {font-size: 18pt; font-weight: 600}',
'header': 'QLabel {font-size: 12pt; font-weight: 520}',
'label': 'QLabel {font-size: 10pt}',
'button': 'QPushButton {font-size: 10pt}',
'run button': 'QPushButton {font-size: 18pt; font-weight: 600}',
'line edit': 'QLineEdit {font-size: 10pt}',
'checkbox': 'QCheckBox {font-size: 10pt}',
'drop down': 'QComboBox {font-size: 10pt}'
}
def __init__(self) -> None:
"""ViolinGUI Constructor. Defines all aspects of the GUI."""
# ## Setup section
# Inherits from QMainWindow
super().__init__()
self.rootdir = get_project_root()
# QMainWindow basic properties
self.setWindowTitle("SCOUTS - Violins")
self.setWindowIcon(QIcon(os.path.abspath(os.path.join(self.rootdir, 'src', 'scouts.ico'))))
# Creates QWidget as QMainWindow's central widget
self.page = QWidget(self)
self.setCentralWidget(self.page)
# Miscellaneous initialization values
self.threadpool = QThreadPool() # Threadpool for workers
self.population_df = None # DataFrame of whole population (raw data)
self.summary_df = None # DataFrame indicating which SCOUTS output corresponds to which rule
self.summary_path = None # path to all DataFrames generated by SCOUTS
self.main_layout = QVBoxLayout(self.page)
# Title section
# Title
self.title = QLabel(self.page)
self.title.setText('SCOUTS - Violins')
self.title.setStyleSheet(self.style['title'])
self.title.adjustSize()
self.main_layout.addWidget(self.title)
# ## Input section
# Input header
self.input_header = QLabel(self.page)
self.input_header.setText('Load data')
self.input_header.setStyleSheet(self.style['header'])
self.input_header.adjustSize()
self.main_layout.addWidget(self.input_header)
# Input/Output frame
self.input_frame = QFrame(self.page)
self.input_frame.setFrameShape(QFrame.StyledPanel)
self.input_frame.setLayout(QFormLayout())
self.main_layout.addWidget(self.input_frame)
# Raw data button
self.input_button = QPushButton(self.page)
self.input_button.setStyleSheet(self.style['button'])
self.set_icon(self.input_button, 'x-office-spreadsheet')
self.input_button.setObjectName('file')
self.input_button.setText(' Load raw data file')
self.input_button.setToolTip('Load raw data file (the file given to SCOUTS as the input file)')
self.input_button.clicked.connect(self.get_path)
# SCOUTS results button
self.output_button = QPushButton(self.page)
self.output_button.setStyleSheet(self.style['button'])
self.set_icon(self.output_button, 'folder')
self.output_button.setObjectName('folder')
self.output_button.setText(' Load SCOUTS results')
self.output_button.setToolTip('Load data from SCOUTS analysis '
'(the folder given to SCOUTS as the output folder)')
self.output_button.clicked.connect(self.get_path)
# Add widgets above to input frame Layout
self.input_frame.layout().addRow(self.input_button)
self.input_frame.layout().addRow(self.output_button)
# ## Samples section
# Samples header
self.samples_header = QLabel(self.page)
self.samples_header.setText('Select sample names')
self.samples_header.setStyleSheet(self.style['header'])
self.samples_header.adjustSize()
self.main_layout.addWidget(self.samples_header)
# Samples frame
self.samples_frame = QFrame(self.page)
self.samples_frame.setFrameShape(QFrame.StyledPanel)
self.samples_frame.setLayout(QFormLayout())
self.main_layout.addWidget(self.samples_frame)
# Samples label
self.samples_label = QLabel(self.page)
self.samples_label.setText('Write sample names delimited by semicolons below.\nEx: Control;Treat_01;Pac-03')
self.samples_label.setStyleSheet(self.style['label'])
# Sample names line edit
self.sample_names = QLineEdit(self.page)
self.sample_names.setStyleSheet(self.style['line edit'])
# Add widgets above to samples frame Layout
self.samples_frame.layout().addRow(self.samples_label)
self.samples_frame.layout().addRow(self.sample_names)
# ## Analysis section
# Analysis header
self.analysis_header = QLabel(self.page)
self.analysis_header.setText('Plot parameters')
self.analysis_header.setStyleSheet(self.style['header'])
self.analysis_header.adjustSize()
self.main_layout.addWidget(self.analysis_header)
# Analysis frame
self.analysis_frame = QFrame(self.page)
self.analysis_frame.setFrameShape(QFrame.StyledPanel)
self.analysis_frame.setLayout(QFormLayout())
self.main_layout.addWidget(self.analysis_frame)
# Analysis labels
self.analysis_label_01 = QLabel(self.page)
self.analysis_label_01.setText('Compare')
self.analysis_label_01.setStyleSheet(self.style['label'])
self.analysis_label_02 = QLabel(self.page)
self.analysis_label_02.setText('with')
self.analysis_label_02.setStyleSheet(self.style['label'])
self.analysis_label_03 = QLabel(self.page)
self.analysis_label_03.setText('for marker')
self.analysis_label_03.setStyleSheet(self.style['label'])
self.analysis_label_04 = QLabel(self.page)
self.analysis_label_04.setText('Outlier type')
self.analysis_label_04.setStyleSheet(self.style['label'])
# Analysis drop-down boxes
self.drop_down_01 = QComboBox(self.page)
self.drop_down_01.addItems(['whole population', 'non-outliers', 'top outliers', 'bottom outliers', 'none'])
self.drop_down_01.setStyleSheet(self.style['drop down'])
self.drop_down_01.setCurrentIndex(2)
self.drop_down_02 = QComboBox(self.page)
self.drop_down_02.addItems(['whole population', 'non-outliers', 'top outliers', 'bottom outliers', 'none'])
self.drop_down_02.setStyleSheet(self.style['drop down'])
self.drop_down_02.setCurrentIndex(0)
self.drop_down_03 = QComboBox(self.page)
self.drop_down_03.setStyleSheet(self.style['drop down'])
self.drop_down_04 = QComboBox(self.page)
self.drop_down_04.addItems(['OutS', 'OutR'])
self.drop_down_04.setStyleSheet(self.style['drop down'])
# Add widgets above to samples frame Layout
self.analysis_frame.layout().addRow(self.analysis_label_01, self.drop_down_01)
self.analysis_frame.layout().addRow(self.analysis_label_02, self.drop_down_02)
self.analysis_frame.layout().addRow(self.analysis_label_03, self.drop_down_03)
self.analysis_frame.layout().addRow(self.analysis_label_04, self.drop_down_04)
self.legend_checkbox = QCheckBox(self.page)
self.legend_checkbox.setText('Add legend to the plot')
self.legend_checkbox.setStyleSheet(self.style['checkbox'])
self.main_layout.addWidget(self.legend_checkbox)
# Plot button (stand-alone)
self.plot_button = QPushButton(self.page)
self.set_icon(self.plot_button, 'system-run')
self.plot_button.setText(' Plot')
self.plot_button.setToolTip('Plot data after loading the input data and selecting parameters')
self.plot_button.setStyleSheet(self.style['run button'])
self.plot_button.setEnabled(False)
self.plot_button.clicked.connect(self.run_plot)
self.main_layout.addWidget(self.plot_button)
# ## Secondary Window
# This is used to plot the violins only
self.secondary_window = QMainWindow(self)
self.secondary_window.resize(720, 720)
self.dynamic_canvas = DynamicCanvas(self.secondary_window, width=6, height=6, dpi=120)
self.secondary_window.setCentralWidget(self.dynamic_canvas)
self.secondary_window.addToolBar(NavBar(self.dynamic_canvas, self.secondary_window))
def set_icon(self, widget: QWidget, icon: str) -> None:
"""Associates an icon to a widget."""
i = QIcon()
i.addPixmap(QPixmap(os.path.abspath(os.path.join(self.rootdir, 'src', 'default_icons', f'{icon}.svg'))))
widget.setIcon(QIcon.fromTheme(icon, i))
def get_path(self) -> None:
"""Opens a dialog box and loads the corresponding data into memory, depending on the caller widget."""
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
query = None
func = None
if self.sender().objectName() == 'file':
query, _ = QFileDialog.getOpenFileName(self, "Select file", "", "All Files (*)", options=options)
func = self.load_scouts_input_data
elif self.sender().objectName() == 'folder':
query = QFileDialog.getExistingDirectory(self, "Select Directory", options=options)
func = self.load_scouts_results
if query:
self.load_data(query, func)
def load_data(self, query: str, func: Callable) -> None:
"""Loads input data into memory, while displaying a loading message as a separate worker."""
worker = Worker(func=func, query=query)
message = self.loading_message()
worker.signals.started.connect(message.show)
worker.signals.started.connect(self.page.setDisabled)
worker.signals.error.connect(self.generic_error_message)
worker.signals.error.connect(message.destroy)
worker.signals.failed.connect(self.plot_button.setDisabled)
worker.signals.success.connect(message.destroy)
worker.signals.success.connect(self.enable_plot)
worker.signals.finished.connect(self.page.setEnabled)
self.threadpool.start(worker)
def loading_message(self) -> QDialog:
"""Returns the message box to be displayed while the user waits for the input data to load."""
message = QDialog(self)
message.setWindowTitle('Loading')
message.resize(300, 50)
label = QLabel('loading DataFrame into memory...', message)
label.setStyleSheet(self.style['label'])
label.adjustSize()
label.setAlignment(Qt.AlignCenter)
label.move(int((message.width() - label.width())/2), int((message.height() - label.height())/2))
return message
def load_scouts_input_data(self, query: str) -> None:
"""Loads data for whole population prior to SCOUTS into memory (used for plotting the whole population)."""
try:
self.population_df = pd.read_excel(query, index_col=0)
except XLRDError:
self.population_df = pd.read_csv(query, index_col=0)
self.drop_down_03.clear()
self.drop_down_03.addItems(list(self.population_df.columns))
self.drop_down_03.setCurrentIndex(0)
def load_scouts_results(self, query: str) -> None:
"""Loads the SCOUTS summary file into memory, in order to dynamically locate SCOUTS output files later when
the user chooses which data to plot."""
self.summary_df = pd.read_excel(os.path.join(query, 'summary.xlsx'), index_col=None)
self.summary_path = query
def enable_plot(self) -> None:
"""Enables plot button if all necessary files are placed in memory."""
if isinstance(self.summary_df, pd.DataFrame) and isinstance(self.population_df, pd.DataFrame):
self.plot_button.setEnabled(True)
def run_plot(self) -> None:
"""Sets and starts the plot worker."""
worker = Worker(func=self.plot)
worker.signals.error.connect(self.generic_error_message)
worker.signals.success.connect(self.secondary_window.show)
self.threadpool.start(worker)
def plot(self) -> None:
"""Logic for plotting data based on user selection of populations, markers, etc."""
# Clear figure currently on plot
self.dynamic_canvas.axes.cla()
# Initialize values and get parameters from GUI
columns = ['sample', 'marker', 'population', 'expression']
samples = self.parse_sample_names()
pop_01 = self.drop_down_01.currentText()
pop_02 = self.drop_down_02.currentText()
pops_to_analyse = [pop_01, pop_02]
marker = self.drop_down_03.currentText()
cutoff_from_reference = True if self.drop_down_04.currentText() == 'OutR' else False
violin_df = pd.DataFrame(columns=columns)
# Start fetching data from files
# Whole population
for pop in pops_to_analyse:
if pop == 'whole population':
for partial_df in self.yield_violin_values(df=self.population_df, population='whole population',
samples=samples, marker=marker, columns=columns):
violin_df = violin_df.append(partial_df)
# Other comparisons
elif pop != 'none':
for file_number in self.yield_selected_file_numbers(summary_df=self.summary_df, population=pop,
cutoff_from_reference=cutoff_from_reference,
marker=marker):
df_path = os.path.join(self.summary_path, 'data', f'{"%04d" % file_number}.')
try:
sample_df = pd.read_excel(df_path + 'xlsx', index_col=0)
except FileNotFoundError:
sample_df = pd.read_csv(df_path + 'csv', index_col=0)
if not sample_df.empty:
for partial_df in self.yield_violin_values(df=sample_df, population=pop, samples=samples,
marker=marker, columns=columns):
violin_df = violin_df.append(partial_df)
# Plot data
pops_to_analyse = [p for p in pops_to_analyse if p != 'none']
violin_df = violin_df[violin_df['marker'] == marker]
for pop in pops_to_analyse:
pop_subset = violin_df.loc[violin_df['population'] == pop]
for sample in samples:
sample_subset = pop_subset.loc[pop_subset['sample'] == sample]
sat = 1.0 - samples.index(sample) / (len(samples) + 1)
self.dynamic_canvas.update_figure(subset_by_sample=sample_subset, pop=pop, sat=sat, samples=samples)
# Draw plotted data on canvas
if self.legend_checkbox.isChecked():
self.dynamic_canvas.add_legend()
self.dynamic_canvas.axes.set_title(f'{marker} expression - {self.drop_down_04.currentText()}')
self.dynamic_canvas.fig.canvas.draw()
def parse_sample_names(self) -> List[str]:
"""Parse sample names from the QLineEdit Widget."""
return self.sample_names.text().split(';')
def generic_error_message(self, error: Tuple[Exception, str]) -> None:
"""Error message box used to display any error message (including traceback) for any uncaught errors."""
name, trace = error
QMessageBox.critical(self, 'An error occurred!', f"Error: {str(name)}\n\nfull traceback:\n{trace}")
def closeEvent(self, event: QEvent) -> None:
"""Defines the message box for when the user wants to quit ViolinGUI."""
title = 'Quit Application'
mes = "Are you sure you want to quit?"
reply = QMessageBox.question(self, title, mes, QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if reply == QMessageBox.Yes:
self.setEnabled(False)
self.threadpool.waitForDone()
event.accept()
else:
event.ignore()
@staticmethod
def yield_violin_values(df: pd.DataFrame, population: str, samples: List[str], marker: str,
columns: List[str]) -> pd.DataFrame:
"""Returns a DataFrame from expression values, along with information of sample, marker and population. This
DataFrame is appended to the violin plot DataFrame in order to simplify plotting the violins afterwards."""
for sample in samples:
series = df.loc[df.index.str.contains(sample)].loc[:, marker]
yield pd.DataFrame({'sample': sample, 'marker': marker, 'population': population, 'expression': series},
columns=columns)
@staticmethod
def yield_selected_file_numbers(summary_df: pd.DataFrame, population: str, cutoff_from_reference: bool,
marker: str) -> Generator[pd.DataFrame, None, None]:
"""Yields file numbers from DataFrames resulting from SCOUTS analysis. DataFrames are yielded based on
global values, i.e. the comparisons the user wants to perform."""
cutoff = 'sample'
if cutoff_from_reference is True:
cutoff = 'reference'
for index, (file_number, cutoff_from, reference, outliers_for, category) in summary_df.iterrows():
if cutoff_from == cutoff and outliers_for == marker and category == population:
yield file_number
class DynamicCanvas(FigureCanvas):
| |
<reponame>GPT-RL/generalization
import math
import warnings
from typing import List, Optional, Tuple
import torch
import torch.nn.functional as F
from torch import Tensor
from torch.nn import Module
from torch.nn.functional import _in_projection, linear
from torch.nn.init import constant_, xavier_normal_, xavier_uniform_
from torch.nn.modules.linear import NonDynamicallyQuantizableLinear
from torch.nn.parameter import Parameter
from torch.overrides import handle_torch_function, has_torch_function
def _in_projection_packed(
q: Tensor,
k: Tensor,
v: Tensor,
w: Tensor,
b: Optional[Tensor] = None,
) -> List[Tensor]:
r"""
Performs the in-projection step of the attention operation, using packed weights.
Output is a triple containing projection tensors for query, key and value.
Args:
q, k, v: query, key and value tensors to be projected. For self-attention,
these are typically the same tensor; for encoder-decoder attention,
k and v are typically the same tensor. (We take advantage of these
identities for performance if they are present.) Regardless, q, k and v
must share a common embedding dimension; otherwise their shapes may vary.
w: projection weights for q, k and v, packed into a single tensor. Weights
are packed along dimension 0, in q, k, v order.
b: optional projection biases for q, k and v, packed into a single tensor
in q, k, v order.
Shape:
Inputs:
- q: :math:`(..., E)` where E is the embedding dimension
- k: :math:`(..., E)` where E is the embedding dimension
- v: :math:`(..., E)` where E is the embedding dimension
- w: :math:`(E * 3, E)` where E is the embedding dimension
- b: :math:`E * 3` where E is the embedding dimension
Output:
- in output list :math:`[q', k', v']`, each output tensor will have the
same shape as the corresponding input tensor.
"""
E = q.size(-1)
if k is v:
if q is k:
# self-attention
return linear(q, w, b).chunk(3, dim=-1)
else:
# encoder-decoder attention
w_q, w_kv = w.split([E, E * 2])
if b is None:
b_q = b_kv = None
else:
b_q, b_kv = b.split([E, E * 2])
return (linear(q, w_q, b_q),) + linear(k, w_kv, b_kv).chunk(2, dim=-1)
else:
w_q, w_k, w_v = w.chunk(3)
if b is None:
b_q = b_k = b_v = None
else:
b_q, b_k, b_v = b.chunk(3)
return linear(q, w_q, b_q), linear(k, w_k, b_k), linear(v, w_v, b_v)
def _scaled_dot_product_attention(
q: Tensor,
k: Tensor,
v: Tensor,
attn_mask: Optional[Tensor] = None,
dropout_p: float = 0.0,
) -> Tuple[Tensor, Tensor]:
r"""
Computes scaled dot product attention on query, key and value tensors, using
an optional attention mask if passed, and applying dropout if a probability
greater than 0.0 is specified.
Returns a tensor pair containing attended values and attention weights.
Args:
q, k, v: query, key and value tensors. See Shape section for shape details.
attn_mask: optional tensor containing mask values to be added to calculated
attention. May be 2D or 3D; see Shape section for details.
dropout_p: dropout probability. If greater than 0.0, dropout is applied.
Shape:
- q: :math:`(B, Nt, E)` where B is batch size, Nt is the target sequence length,
and E is embedding dimension.
- key: :math:`(B, Ns, E)` where B is batch size, Ns is the source sequence length,
and E is embedding dimension.
- value: :math:`(B, Ns, E)` where B is batch size, Ns is the source sequence length,
and E is embedding dimension.
- attn_mask: either a 3D tensor of shape :math:`(B, Nt, Ns)` or a 2D tensor of
shape :math:`(Nt, Ns)`.
- Output: attention values have shape :math:`(B, Nt, E)`; attention weights
have shape :math:`(B, Nt, Ns)`
"""
B, Nt, E = q.shape
q = q / math.sqrt(E)
# (B, Nt, E) x (B, E, Ns) -> (B, Nt, Ns)
attn = torch.bmm(q, k.transpose(-2, -1))
if attn_mask is not None:
attn += attn_mask
attn = F.softmax(attn, dim=-1)
if dropout_p > 0.0:
attn = F.dropout(attn, p=dropout_p)
# (B, Nt, Ns) x (B, Ns, E) -> (B, Nt, E)
output = torch.bmm(attn, v)
return output, attn
def multi_head_attention_forward(
query: Tensor,
key: Tensor,
value: Tensor,
embed_dim_to_check: int,
num_heads: int,
in_proj_weight: Tensor,
in_proj_bias: Optional[Tensor],
bias_k: Optional[Tensor],
bias_v: Optional[Tensor],
add_zero_attn: bool,
dropout_p: float,
out_proj_weight: Tensor,
out_proj_bias: Optional[Tensor],
training: bool = True,
key_padding_mask: Optional[Tensor] = None,
need_weights: bool = True,
attn_mask: Optional[Tensor] = None,
use_separate_proj_weight: bool = False,
q_proj_weight: Optional[Tensor] = None,
k_proj_weight: Optional[Tensor] = None,
v_proj_weight: Optional[Tensor] = None,
static_k: Optional[Tensor] = None,
static_v: Optional[Tensor] = None,
) -> Tuple[Tensor, Optional[Tensor]]:
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
embed_dim_to_check: total dimension of the model.
num_heads: parallel attention heads.
in_proj_weight, in_proj_bias: input projection weight and bias.
bias_k, bias_v: bias of the key and value sequences to be added at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
dropout_p: probability of an element to be zeroed.
out_proj_weight, out_proj_bias: the output projection weight and bias.
training: apply dropout if is ``True``.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. This is an binary mask. When the value is True,
the corresponding value on the attention layer will be filled with -inf.
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
use_separate_proj_weight: the function accept the proj. weights for query, key,
and value in different forms. If false, in_proj_weight will be used, which is
a combination of q_proj_weight, k_proj_weight, v_proj_weight.
q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias.
static_k, static_v: static key and value used for attention operators.
Shape:
Inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions
will be unchanged. If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked
positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
- static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
- static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
Outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
tens_ops = (
query,
key,
value,
in_proj_weight,
in_proj_bias,
bias_k,
bias_v,
out_proj_weight,
out_proj_bias,
)
if has_torch_function(tens_ops):
return handle_torch_function(
multi_head_attention_forward,
tens_ops,
query,
key,
value,
embed_dim_to_check,
num_heads,
in_proj_weight,
in_proj_bias,
bias_k,
bias_v,
add_zero_attn,
dropout_p,
out_proj_weight,
out_proj_bias,
training=training,
key_padding_mask=key_padding_mask,
need_weights=need_weights,
attn_mask=attn_mask,
use_separate_proj_weight=use_separate_proj_weight,
q_proj_weight=q_proj_weight,
k_proj_weight=k_proj_weight,
v_proj_weight=v_proj_weight,
static_k=static_k,
static_v=static_v,
| |
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(
hours, minutes)
except AttributeError:
pass
return _svalue
@classmethod
def gds_parse_date(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
dt = datetime_.datetime.strptime(input_data, '%Y-%m-%d')
dt = dt.replace(tzinfo=tz)
return dt.date()
def gds_validate_time(self, input_data, node=None, input_name=''):
return input_data
def gds_format_time(self, input_data, input_name=''):
if input_data.microsecond == 0:
_svalue = '%02d:%02d:%02d' % (
input_data.hour,
input_data.minute,
input_data.second,
)
else:
_svalue = '%02d:%02d:%02d.%s' % (
input_data.hour,
input_data.minute,
input_data.second,
('%f' % (float(input_data.microsecond) / 1000000))[2:],
)
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
def gds_validate_simple_patterns(self, patterns, target):
# pat is a list of lists of strings/patterns.
# The target value must match at least one of the patterns
# in order for the test to succeed.
found1 = True
for patterns1 in patterns:
found2 = False
for patterns2 in patterns1:
mo = re_.search(patterns2, target)
if mo is not None and len(mo.group(0)) == len(target):
found2 = True
break
if not found2:
found1 = False
break
return found1
@classmethod
def gds_parse_time(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
if len(input_data.split('.')) > 1:
dt = datetime_.datetime.strptime(input_data, '%H:%M:%S.%f')
else:
dt = datetime_.datetime.strptime(input_data, '%H:%M:%S')
dt = dt.replace(tzinfo=tz)
return dt.time()
def gds_check_cardinality_(
self, value, input_name,
min_occurs=0, max_occurs=1, required=None):
if value is None:
length = 0
elif isinstance(value, list):
length = len(value)
else:
length = 1
if required is not None :
if required and length < 1:
self.gds_collector_.add_message(
"Required value {}{} is missing".format(
input_name, self.gds_get_node_lineno_()))
if length < min_occurs:
self.gds_collector_.add_message(
"Number of values for {}{} is below "
"the minimum allowed, "
"expected at least {}, found {}".format(
input_name, self.gds_get_node_lineno_(),
min_occurs, length))
elif length > max_occurs:
self.gds_collector_.add_message(
"Number of values for {}{} is above "
"the maximum allowed, "
"expected at most {}, found {}".format(
input_name, self.gds_get_node_lineno_(),
max_occurs, length))
def gds_validate_builtin_ST_(
self, validator, value, input_name,
min_occurs=None, max_occurs=None, required=None):
if value is not None:
try:
validator(value, input_name=input_name)
except GDSParseError as parse_error:
self.gds_collector_.add_message(str(parse_error))
def gds_validate_defined_ST_(
self, validator, value, input_name,
min_occurs=None, max_occurs=None, required=None):
if value is not None:
try:
validator(value)
except GDSParseError as parse_error:
self.gds_collector_.add_message(str(parse_error))
def gds_str_lower(self, instring):
return instring.lower()
def get_path_(self, node):
path_list = []
self.get_path_list_(node, path_list)
path_list.reverse()
path = '/'.join(path_list)
return path
Tag_strip_pattern_ = re_.compile(r'\{.*\}')
def get_path_list_(self, node, path_list):
if node is None:
return
tag = GeneratedsSuper.Tag_strip_pattern_.sub('', node.tag)
if tag:
path_list.append(tag)
self.get_path_list_(node.getparent(), path_list)
def get_class_obj_(self, node, default_class=None):
class_obj1 = default_class
if 'xsi' in node.nsmap:
classname = node.get('{%s}type' % node.nsmap['xsi'])
if classname is not None:
names = classname.split(':')
if len(names) == 2:
classname = names[1]
class_obj2 = globals().get(classname)
if class_obj2 is not None:
class_obj1 = class_obj2
return class_obj1
def gds_build_any(self, node, type_name=None):
# provide default value in case option --disable-xml is used.
content = ""
content = etree_.tostring(node, encoding="unicode")
return content
@classmethod
def gds_reverse_node_mapping(cls, mapping):
return dict(((v, k) for k, v in mapping.items()))
@staticmethod
def gds_encode(instring):
if sys.version_info.major == 2:
if ExternalEncoding:
encoding = ExternalEncoding
else:
encoding = 'utf-8'
return instring.encode(encoding)
else:
return instring
@staticmethod
def convert_unicode(instring):
if isinstance(instring, str):
result = quote_xml(instring)
elif sys.version_info.major == 2 and isinstance(instring, unicode):
result = quote_xml(instring).encode('utf8')
else:
result = GeneratedsSuper.gds_encode(str(instring))
return result
def __eq__(self, other):
def excl_select_objs_(obj):
return (obj[0] != 'parent_object_' and
obj[0] != 'gds_collector_')
if type(self) != type(other):
return False
return all(x == y for x, y in zip_longest(
filter(excl_select_objs_, self.__dict__.items()),
filter(excl_select_objs_, other.__dict__.items())))
def __ne__(self, other):
return not self.__eq__(other)
# Django ETL transform hooks.
def gds_djo_etl_transform(self):
pass
def gds_djo_etl_transform_db_obj(self, dbobj):
pass
# SQLAlchemy ETL transform hooks.
def gds_sqa_etl_transform(self):
return 0, None
def gds_sqa_etl_transform_db_obj(self, dbobj):
pass
def gds_get_node_lineno_(self):
if (hasattr(self, "gds_elementtree_node_") and
self.gds_elementtree_node_ is not None):
return ' near line {}'.format(
self.gds_elementtree_node_.sourceline)
else:
return ""
def getSubclassFromModule_(module, class_):
'''Get the subclass of a class from a specific module.'''
name = class_.__name__ + 'Sub'
if hasattr(module, name):
return getattr(module, name)
else:
return None
#
# If you have installed IPython you can uncomment and use the following.
# IPython is available from http://ipython.scipy.org/.
#
## from IPython.Shell import IPShellEmbed
## args = ''
## ipshell = IPShellEmbed(args,
## banner = 'Dropping into IPython',
## exit_msg = 'Leaving Interpreter, back to program.')
# Then use the following line where and when you want to drop into the
# IPython shell:
# ipshell('<some message> -- Entering ipshell.\nHit Ctrl-D to exit')
#
# Globals
#
ExternalEncoding = ''
# Set this to false in order to deactivate during export, the use of
# name space prefixes captured from the input document.
UseCapturedNS_ = True
CapturedNsmap_ = {}
Tag_pattern_ = re_.compile(r'({.*})?(.*)')
String_cleanup_pat_ = re_.compile(r"[\n\r\s]+")
Namespace_extract_pat_ = re_.compile(r'{(.*)}(.*)')
CDATA_pattern_ = re_.compile(r"<!\[CDATA\[.*?\]\]>", re_.DOTALL)
# Change this to redirect the generated superclass module to use a
# specific subclass module.
CurrentSubclassModule_ = None
#
# Support/utility functions.
#
def showIndent(outfile, level, pretty_print=True):
if pretty_print:
for idx in range(level):
outfile.write(' ')
def quote_xml(inStr):
"Escape markup chars, but do not modify CDATA sections."
if not inStr:
return ''
s1 = (isinstance(inStr, BaseStrType_) and inStr or '%s' % inStr)
s2 = ''
pos = 0
matchobjects = CDATA_pattern_.finditer(s1)
for mo in matchobjects:
s3 = s1[pos:mo.start()]
s2 += quote_xml_aux(s3)
s2 += s1[mo.start():mo.end()]
pos = mo.end()
s3 = s1[pos:]
s2 += quote_xml_aux(s3)
return s2
def quote_xml_aux(inStr):
s1 = inStr.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
return s1
def quote_attrib(inStr):
s1 = (isinstance(inStr, BaseStrType_) and inStr or '%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
if '"' in s1:
if "'" in s1:
s1 = '"%s"' % s1.replace('"', """)
else:
s1 = "'%s'" % s1
else:
s1 = '"%s"' % s1
return s1
def quote_python(inStr):
s1 = inStr
if s1.find("'") == -1:
if s1.find('\n') == -1:
return "'%s'" % s1
else:
return "'''%s'''" % s1
else:
if s1.find('"') != -1:
s1 = s1.replace('"', '\\"')
if s1.find('\n') == -1:
return '"%s"' % s1
else:
return '"""%s"""' % s1
def get_all_text_(node):
if node.text is not None:
text = node.text
else:
text = ''
for child in node:
if child.tail is not None:
text += child.tail
return text
def find_attr_value_(attr_name, node):
attrs = node.attrib
attr_parts = attr_name.split(':')
value = None
if len(attr_parts) == 1:
value = attrs.get(attr_name)
elif len(attr_parts) == 2:
prefix, name = attr_parts
namespace = node.nsmap.get(prefix)
if namespace is not None:
value = attrs.get('{%s}%s' % (namespace, name, ))
return value
def encode_str_2_3(instr):
return instr
class GDSParseError(Exception):
pass
def raise_parse_error(node, msg):
if node is not None:
msg = '%s (element %s/line %d)' % (msg, node.tag, node.sourceline, )
raise GDSParseError(msg)
class MixedContainer:
# Constants for category:
CategoryNone = 0
CategoryText = 1
CategorySimple = 2
CategoryComplex = 3
# Constants for content_type:
TypeNone = 0
TypeText = 1
TypeString = 2
TypeInteger = 3
TypeFloat = 4
TypeDecimal = 5
TypeDouble = 6
TypeBoolean = 7
TypeBase64 = 8
def __init__(self, category, content_type, name, value):
self.category = category
self.content_type = content_type
self.name = name
self.value = value
def getCategory(self):
return self.category
def getContenttype(self, content_type):
return self.content_type
def getValue(self):
return self.value
def getName(self):
return self.name
def export(self, outfile, level, name, namespace,
pretty_print=True):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
outfile.write(self.value)
elif self.category == MixedContainer.CategorySimple:
self.exportSimple(outfile, level, name)
else: # category == MixedContainer.CategoryComplex
self.value.export(
outfile, level, namespace, name_=name,
pretty_print=pretty_print)
def exportSimple(self, outfile, level, name):
if self.content_type == | |
self.cursor.execute(
"DROP FUNCTION IF EXISTS l1sum (double precision, double precision);"
)
self.cursor.execute(
"CREATE OR REPLACE FUNCTION l1sum (double precision, double precision) returns text AS $$select concat('log(exp(', cast($1 AS text), ') + exp(', cast($2 AS text), ') - 1)')$$ LANGUAGE SQL;"
)
self.cursor.execute("DROP FUNCTION IF EXISTS l1sum (text, double precision);")
self.cursor.execute(
"CREATE OR REPLACE FUNCTION l1sum (text, double precision) returns text AS $$select concat('log(exp(', $1, ') + exp(', cast($2 AS text), ') - 1)')$$ LANGUAGE SQL;"
)
self.cursor.execute("DROP FUNCTION IF EXISTS l1sum (double precision, text);")
self.cursor.execute(
"CREATE OR REPLACE FUNCTION l1sum (double precision, text) returns text AS $$select concat('log(exp(', cast($1 AS text), ') + exp(', $2, ') - 1)')$$ LANGUAGE SQL;"
)
self.cursor.execute("DROP FUNCTION IF EXISTS l1sum (text, text);")
self.cursor.execute(
"CREATE OR REPLACE FUNCTION l1sum (text, text) returns text AS $$select concat('log(exp(', $1, ') + exp(', $2, ') - 1)')$$ LANGUAGE SQL;"
)
# Aggregate functions for Numeric Safe Sample
self.cursor.execute(
"CREATE OR REPLACE FUNCTION ior_sfunc_n (double precision, double precision) RETURNS double precision AS 'select max(val) from (VALUES($1 * (1.0 - $2)), (0.00001)) AS Vals(val)' LANGUAGE SQL;"
)
self.cursor.execute(
"CREATE OR REPLACE FUNCTION ior_finalfunc_n (double precision) RETURNS double precision AS 'select 1.0 - $1' LANGUAGE SQL;"
)
self.cursor.execute("DROP AGGREGATE IF EXISTS ior_n (double precision);")
self.cursor.execute(
"CREATE AGGREGATE ior_n (double precision) (sfunc = ior_sfunc_n, stype = double precision, finalfunc = ior_finalfunc_n, initcond = '1.0');"
)
self.cursor.execute(
"CREATE OR REPLACE FUNCTION l_ior_sfunc_n (double precision, double precision) RETURNS double precision AS 'select $1 + $2' LANGUAGE SQL;"
)
self.cursor.execute("DROP AGGREGATE IF EXISTS l_ior_n (double precision);")
self.cursor.execute(
"CREATE AGGREGATE l_ior_n (double precision) (sfunc = l_ior_sfunc_n, stype = double precision, initcond = '0.0');"
)
self.cursor.execute(
"CREATE OR REPLACE FUNCTION l1prod_n (double precision, double precision) RETURNS double precision AS 'select case when $1 > -745 AND $2 > -745 then m + ln(exp($1-m) + exp($2-m) - exp($1+$2-m)) else m end from(select max(val) as m from (VALUES($1), ($2)) AS Vals(val)) as foo' LANGUAGE SQL;"
)
self.cursor.execute(
"CREATE OR REPLACE FUNCTION l1diff_n (double precision, double precision) RETURNS double precision AS 'select case when $1 >= -745 and $2 >= -745 and 1+exp($1)-exp($2) > 0 then ln(1 - exp($2) + exp($1)) when $1 >= -745 and $2 >= -745 and 1+exp($1)-exp($2) <= 0 then NULL when $1 >= -745 and $2 < -745 then ln(1+exp($1)) when $1 < -745 and $2 > 0 then NULL when $1 < -745 and $2 <= 0 and $2 >= -745 then ln(1-exp($2)) else 0 end' LANGUAGE SQL;"
)
self.cursor.execute(
"CREATE OR REPLACE FUNCTION l1sum_n (double precision, double precision) RETURNS double precision AS 'select case when $1 >= -745 and $2 >= -745 and exp($1)+exp($2)-1 > 0 then ln(exp($1) + exp($2) - 1) when $1 >= -745 and $2 >= -745 and exp($1)+exp($2)-1 <= 0 then NULL when $1 > 0 and $2 < -745 then ln(exp($1)-1) when $1 < -745 and $2 > 0 then ln(exp($2)-1) else NULL end' LANGUAGE SQL;"
)
# Aggregate functions for Automatic Differentiation
self.cursor.execute("DROP AGGREGATE IF EXISTS ior_ad (double precision);")
self.cursor.execute("DROP AGGREGATE IF EXISTS ior_ad (text);")
self.cursor.execute(
"DROP FUNCTION IF EXISTS ior_sfunc_ad (text, double precision);"
)
self.cursor.execute(
"CREATE OR REPLACE FUNCTION ior_sfunc_ad (text, double precision) returns text AS $$select concat($1, ' a = a*(1 - ', cast($2 AS text), ');')$$ LANGUAGE SQL;"
)
self.cursor.execute("DROP FUNCTION IF EXISTS ior_finalfunc_ad (text);")
self.cursor.execute(
"CREATE OR REPLACE FUNCTION ior_finalfunc_ad (text) returns text AS $$select concat($1, ' p = 1 - a;')$$ LANGUAGE SQL;"
)
self.cursor.execute(
"CREATE AGGREGATE ior_ad (double precision) (sfunc = ior_sfunc_ad, stype = text, finalfunc = ior_finalfunc_ad, initcond = '');"
)
self.cursor.execute("DROP FUNCTION IF EXISTS ior_sfunc_ad (text, text);")
self.cursor.execute(
"CREATE OR REPLACE FUNCTION ior_sfunc_ad (text, text) returns text AS $$select concat($1, ' a = a*(1 - ', $2, ');')$$ LANGUAGE SQL;"
)
self.cursor.execute(
"CREATE AGGREGATE ior_ad (text) (sfunc = ior_sfunc_ad, stype = text, finalfunc = ior_finalfunc_ad, initcond = '');"
)
self.cursor.execute("DROP AGGREGATE IF EXISTS l_ior_ad (double precision);")
self.cursor.execute("DROP AGGREGATE IF EXISTS l_ior_ad (text);")
self.cursor.execute(
"DROP FUNCTION IF EXISTS l_ior_sfunc_ad (text, double precision);"
)
self.cursor.execute(
"CREATE OR REPLACE FUNCTION l_ior_sfunc_ad (text, double precision) returns text AS $$select concat($1, ' p = p + ', cast($2 AS text), ';')$$ LANGUAGE SQL;"
)
self.cursor.execute("DROP FUNCTION IF EXISTS l_ior_finalfunc_ad (text);")
self.cursor.execute(
"CREATE OR REPLACE FUNCTION l_ior_finalfunc_ad (text) returns text AS $$select $1 $$ LANGUAGE SQL;"
)
self.cursor.execute(
"CREATE AGGREGATE l_ior_ad (double precision) (sfunc = l_ior_sfunc_ad, stype = text, finalfunc = l_ior_finalfunc_ad, initcond = '');"
)
self.cursor.execute("DROP FUNCTION IF EXISTS l_ior_sfunc_ad (text, text);")
self.cursor.execute(
"CREATE OR REPLACE FUNCTION l_ior_sfunc_ad (text, text) returns text AS $$select concat($1, ' p = p + ', $2, ';')$$ LANGUAGE SQL;"
)
self.cursor.execute(
"CREATE AGGREGATE l_ior_ad (text) (sfunc = l_ior_sfunc_ad, stype = text, finalfunc = l_ior_finalfunc_ad, initcond = '');"
)
time_total = time() - time_start
getLogger("log").debug(
"%-*s: %.1fs" % (self.pad - 1, "Time - initialize PSQLDB", time_total)
)
def learn_read_file(self, input_file=None):
# ------------------------------------- Read the input file --------------------------------------
time_start = time()
self.initialize_psqldb()
self._scores_correct = []
self._examples = []
self.target_predicate = ""
def read(file):
inputf = open(file, "r")
for line in inputf:
# Pre-processing
line = line.replace(" ", "")
if line == "\n":
continue
elif line[0] == "%":
continue
# Reading Lines
if line[:5] == "base(":
predicate = line[5:].split("(")[0]
types = line[5:].split("(")[1].split(")")[-3].split(",")
arity = len(types)
if arity != 2:
getLogger("log").error(
"Arity of Predicate ("
+ predicate
+ ") is "
+ str(arity)
+ " instead of 2."
)
return
for type in types:
if type not in self.constant_dict:
self.constant_dict[type] = {}
self.predicate_dict[predicate] = types
self.cw_total[predicate] = 0
self.weights[predicate] = 0
sql_query = "CREATE TABLE IF NOT EXISTS " + predicate + " ("
i = 0
while i < arity:
sql_query = sql_query + "v" + str(i) + " integer, "
i += 1
sql_query = sql_query + "p double precision);"
self.cursor.execute(sql_query)
elif line[:6] == "learn(":
if self.target is not None:
continue
self.target_predicate = line.split("(")[1].split("/")[0]
target_arity = int(line.split("/")[1].split(")")[0])
arguments = [Var("A"), Var("B")]
self._target = Term(str(self.target_predicate), *arguments)
# self.hypothesisAscii = 64 + self.targetArity
self.hypothesis_free_vars = 0
if target_arity != 2:
getLogger("log").error(
"Arity of Target Predicate ("
+ self.target_predicate
+ ") is "
+ str(target_arity)
+ " instead of 2."
)
return
elif line[:5] == "mode(":
# Mode is not required when generating candidates from AMIE
continue
else:
# Read Probabilistic Fact
prob = "0"
predicate = ""
if "::" in line.split('"')[0]:
predicate = line.split("::")[1].split("(")[0]
prob = line.split("::")[0]
if float(prob) > 1 - self.tolerance:
prob = str(eval("1 - " + str(self.tolerance)))
else:
predicate = line.split("(")[0]
prob = str(eval("1 - " + str(self.tolerance)))
self.cw_total[predicate] += 1
if self.facts_with_quotes:
subject = line.split("(")[1].split('","')[0] + '"'
object = (
'"' + "(".join(line.split("(")[1:]).split('","')[1][:-3]
)
else:
subject = line.split("(")[1].split(",")[0]
object = line.split(")")[-2].split(",")[1]
if subject not in self.constant_id:
self.constant_id[subject] = self.constant_count
self.constant_dict[self.predicate_dict[predicate][0]][
subject
] = self.constant_count
self.constant_count += 1
if object not in self.constant_id:
self.constant_id[object] = self.constant_count
self.constant_dict[self.predicate_dict[predicate][1]][
object
] = self.constant_count
self.constant_count += 1
subject_index = self.constant_id[subject]
object_index = self.constant_id[object]
self.cursor.execute(
"INSERT INTO "
+ predicate
+ " VALUES ("
+ str(subject_index)
+ ", "
+ str(object_index)
+ ", "
+ prob
+ ");"
)
if predicate == self.target_predicate:
args = [subject, object]
prob = float(prob)
if args in self.examples:
old_prob = self._scores_correct[self.examples.index(args)]
new_prob = prob + old_prob - prob * old_prob
self._scores_correct[self.examples.index(args)] = new_prob
else:
self._examples.append(args)
self._scores_correct.append(prob)
inputf.close()
if self.target is not None:
target_arity = self._target._Term__arity
self.target_predicate = self._target._Term__functor
# self.hypothesisAscii = 64 + self.targetArity
self.hypothesis_free_vars = 0
if target_arity != 2:
getLogger("log").error(
"Arity of Target Predicate ("
+ self.target_predicate
+ ") is "
+ str(target_arity)
+ " instead of 2."
)
return
if input_file is None:
read(self.input_file)
else:
read(input_file)
self.total_examples = len(self.examples)
getLogger("log").info(
"%-*s: %d" % (self.pad, "Number of examples (M)", self.total_examples)
)
getLogger("log").info(
"%-*s: %.4f"
% (self.pad, "Positive probabilistic part (P)", sum(self._scores_correct))
)
getLogger("log").info(
"%-*s: %.4f"
% (
self.pad,
"Negative probabilistic part (N)",
self.total_examples - sum(self._scores_correct),
)
)
self.predicate_list = list(self.predicate_dict.keys())
self.predicate_list.remove(self.target_predicate)
self.weights.pop(self.target_predicate, None)
self.time_read = | |
"))
statusLabel.setForeground(Color.RED)
myPrint("B", "'FIX: Investment Security Txns with Invalid Parent Accounts' - You have %s errors to manually first first!" %(iCountUnfixable))
myPopupInformationBox(jif,"You have %s errors to manually first first!" %(iCountUnfixable), "FIX: Investment Security Txns with Invalid Parent Accounts",JOptionPane.ERROR_MESSAGE)
return
if not confirm_backup_confirm_disclaimer(jif,statusLabel,"FIX %s SECURITY TXNS INVALID PARENT ACCTS" %(iCountErrors),"FIX %s Security Txns with Invalid Parent Accts?" %(iCountErrors)):
return
jif.dispose()
myPrint("B", "User accepted disclaimer to FIX Investment Security Txns with Invalid Parent Accounts. Proceeding.....")
output += "\n\nRUNNING FIX ON SECURITY TXNS TO RE-LINK PARENT ACCOUNTS\n" \
"------------------------------------------------------------\n\n"
moneydance_data.setRecalcBalances(False)
moneydance_ui.setSuspendRefresh(True)
x, iCountErrors, iCountUnfixable, iErrorsFixed = review_security_accounts(FIX_MODE=True)
moneydance_ui.getMain().saveCurrentAccount()
moneydance_data.setRecalcBalances(True)
moneydance_ui.setSuspendRefresh(False) # This does this too: book.notifyAccountModified(root)
output += x
output += "\n\nYou had %s errors, with %s needing manual fixes first... I HAVE FIXED %s\n\n" %(iCountErrors, iCountUnfixable, iErrorsFixed)
output += "\n<END>"
myPrint("B", "FIXED %s Investment Security Txns with Invalid Parent Accounts" %(iErrorsFixed))
play_the_money_sound()
statusLabel.setText(("FIXED %s Investment Security Txns with Invalid Parent Accounts" %(iErrorsFixed)).ljust(800, " "))
statusLabel.setForeground(DARK_GREEN)
jif=QuickJFrame("VIEW Investment Security Txns with Invalid Parent Accounts".upper(), output).show_the_frame()
myPopupInformationBox(jif,"FIXED %s Investment Security Txns with Invalid Parent Accounts" %(iErrorsFixed), "FIX Investment Security Txns with Invalid Parent Accounts", JOptionPane.WARNING_MESSAGE)
myPrint("D", "Exiting ", inspect.currentframe().f_code.co_name, "()")
return
def fix_delete_one_sided_txns(statusLabel):
global toolbox_frame_, debug
# delete_invalid_txns.py
myPrint("D", "In ", inspect.currentframe().f_code.co_name, "()")
myPrint("B", "Script running to analyse whether you have any one sided transactions - usually from Quicken Imports....")
myPrint("P", "--------------------------------------------------------------------------------------------------------")
book = moneydance.getCurrentAccountBook()
txnSet = book.getTransactionSet()
txns = txnSet.iterableTxns()
output = ""
toDelete = []
output +="\nLIST OF ONE SIDED TRANSACTIONS (usually from Quicken Imports)\n"
output +="-------------------------------------------------------------\n"
for txn in txns:
if txn.getOtherTxnCount() == 0:
output += pad(str(txn.getUUID()),50)+" "
output += "Date: "+pad(str(txn.getDateInt()),15)+" "
output += pad(str(txn.getAccount()),25)+" "
output += pad(str(txn.getAccount().getAccountType()),25)+" "
output += pad(str(txn.getTransferType()),15)+" "
output += rpad(str(txn.getValue()),12)+" "
output += "\n"
toDelete.append(txn)
if not len(toDelete)>0:
myPrint("J","Congratulations - You have no one-sided transactions to delete!!")
statusLabel.setText(("Congratulations - You have no one-sided transactions to delete!!").ljust(800, " "))
statusLabel.setForeground(Color.BLUE)
myPopupInformationBox(toolbox_frame_, "Congratulations - You have no one-sided transactions to delete!!", "DELETE ONE-SIDE TXNS", JOptionPane.INFORMATION_MESSAGE)
return
output += "\n<END>"
jif=QuickJFrame("LIST OF ONE SIDED TRANSACTIONS (usually from Quicken Imports)", output).show_the_frame()
myPrint("J","You have %s one-sided transactions that can be deleted!!"%len(toDelete))
myPopupInformationBox(jif, "You have %s one-sided transactions that can de deleted!!"%len(toDelete), "DELETE ONE-SIDE TXNS", JOptionPane.WARNING_MESSAGE)
if not confirm_backup_confirm_disclaimer(jif, statusLabel, "DELETE ONE-SIDED TRANSACTIONS", "delete %s one-sided transactions?" %(len(toDelete))):
return
moneydance_data.setRecalcBalances(False)
moneydance_ui.setSuspendRefresh(True)
for t in toDelete:
myPrint("J", "Item %s deleted" %t.getUUID())
t.deleteItem()
moneydance_ui.getMain().saveCurrentAccount()
moneydance_data.setRecalcBalances(True)
moneydance_ui.setSuspendRefresh(False) # This does this too: book.notifyAccountModified(root)
myPrint("B", "Deleted %s invalid one-sided transactions" % len(toDelete))
play_the_money_sound()
statusLabel.setText(("%s One-Sided Transactions DELETED!" %len(toDelete)).ljust(800, " "))
statusLabel.setForeground(DARK_GREEN)
myPopupInformationBox(jif,"Congratulations - All One Sided Transactions DELETED!!", "DELETE ONE-SIDE TXNS", JOptionPane.WARNING_MESSAGE)
myPrint("D", "Exiting ", inspect.currentframe().f_code.co_name, "()")
return
def convert_stock_avg_cst_control(statusLabel):
global toolbox_frame_, debug
myPrint("D", "In ", inspect.currentframe().f_code.co_name, "()")
if moneydance_data is None: return
if not myPopupAskQuestion(toolbox_frame_,"CONVERT ACCT/STOCK TO Avg Cst Ctrl","Do you want to convert a stock to Average Cost Control and reset/wipe any LOT data?",theMessageType=JOptionPane.WARNING_MESSAGE):
myPopupInformationBox(toolbox_frame_,"NO CHANGES MADE!",theMessageType=JOptionPane.WARNING_MESSAGE)
return
accountsList = AccountUtil.allMatchesForSearch(moneydance_data, MyAcctFilter(14))
accountsList = sorted(accountsList, key=lambda sort_x: (sort_x.getFullAccountName().upper()))
accountSec = JOptionPane.showInputDialog(toolbox_frame_,
"Select a LOT Controlled Acct/Stock to convert to Avg Cost Control",
"CONVERT ACCT/STOCK TO Avg Cst Ctrl",
JOptionPane.INFORMATION_MESSAGE,
moneydance_ui.getIcon("/com/moneydance/apps/md/view/gui/glyphs/appicon_64.png"),
accountsList,
None)
if not accountSec:
statusLabel.setText(("CONVERT ACCT/STOCK TO Avg Cst Ctrl - No Account/Security was selected - no changes made..").ljust(800, " "))
statusLabel.setForeground(Color.BLUE)
myPopupInformationBox(toolbox_frame_,"NO CHANGES MADE!",theMessageType=JOptionPane.WARNING_MESSAGE)
return
class SecurityObj:
def __init__(self,Obj,Book): # noqa
self.Obj = Obj
self.Acct = Obj.getParentAccount()
self.TxnSet = Book.getTransactionSet().getTransactionsForAccount(Obj)
self.Name = Obj.getAccountName()
self.Num = Obj.getAccountNum()
self.Type = "SECURITY"
self.AvgCost = Obj.getUsesAverageCost()
self.Txns = []
for _Txn in self.TxnSet: self.Txns.append(TxnObj(_Txn))
class TxnObj:
def __init__(self,Txn): # noqa
self.Obj = Txn
self.Parent = Txn.getParentTxn()
self.ID = Txn.getUUID()
self.DateInt = Txn.getDateInt()
self.Type = self.Parent.getInvestTxnType().getIDString()
self.saveCostBasisState = self.Obj.getParameter("cost_basis",None)
Book = moneydance.getCurrentAccountBook()
# We are forcing just the one selected Security into the List (the original script allowed user to hard code several)
Securities = [SecurityObj(accountSec,Book)]
iErrors=0
for Security in Securities:
for Txn in Security.Txns:
if (InvestUtil.isSaleTransaction(Txn.Parent.getInvestTxnType())
and (Txn.Obj.getParameter("cost_basis", None) is not None)):
iErrors+=1
if not confirm_backup_confirm_disclaimer(toolbox_frame_,statusLabel,"CONVERT ACCT/STOCK TO Avg Cst Ctrl","Convert %s to Avg Cst Control and wipe %s LOT records?" %(accountSec,iErrors)):
return
listWiped=""
for Security in Securities:
myPrint("B","@@ User requested to convert Acct/Security %s to Average Lot Control and wipe %s LOT records... EXECUTING NOW" %(Security.Obj, iErrors))
for Txn in Security.Txns:
if (InvestUtil.isSaleTransaction(Txn.Parent.getInvestTxnType())
and (Txn.Obj.getParameter("cost_basis", None) is not None)):
listWiped+=" %s Wiped LOT tag on record (was: %s)\n" %(Security.Obj, Txn.Obj.getParameter("cost_basis", None))
myPrint("B","@@ Security %s Wiping LOT record on %s" %(Security.Obj, Txn.Obj))
Txn.Obj.setParameter("cost_basis", None)
Txn.Obj.syncItem()
Security.Obj.setUsesAverageCost(True)
Security.AvgCost = True
Security.Obj.syncItem()
myPrint("B", "CONVERT ACCT/STOCK TO Avg Cst Ctrl - Security %s Changed to Average Cost Control (and %s LOT records wiped)"%(accountSec,iErrors))
statusLabel.setText(("CONVERT ACCT/STOCK TO Avg Cst Ctrl - Security %s Changed to Average Cost Control (and %s LOT records wiped)"%(accountSec,iErrors)).ljust(800, " "))
statusLabel.setForeground(Color.RED)
play_the_money_sound()
MyPopUpDialogBox(toolbox_frame_,
theStatus="Security %s converted to Average Cost Control (I wiped %s LOT records - shown below)" %(accountSec,iErrors),
theMessage="%s" %(listWiped),
theWidth=200,
theTitle="CONVERT ACCT/STOCK TO Avg Cst Ctrl",
lAlertLevel=1).go()
myPrint("D", "Exiting ", inspect.currentframe().f_code.co_name, "()")
return
def convert_stock_lot_FIFO(statusLabel):
global toolbox_frame_, debug
# MakeFifoCost.py (author unknown)
myPrint("D", "In ", inspect.currentframe().f_code.co_name, "()")
if moneydance_data is None: return
if not myPopupAskQuestion(toolbox_frame_,"CONVERT ACCT/STOCK TO LOT/FIFO","Do you want to attempt to convert a stock to LOT Controlled and match Sells to Buys using FiFo?",theMessageType=JOptionPane.WARNING_MESSAGE):
myPopupInformationBox(toolbox_frame_,"NO CHANGES MADE!",theMessageType=JOptionPane.WARNING_MESSAGE)
return
accountsList = AccountUtil.allMatchesForSearch(moneydance_data, MyAcctFilter(13))
accountsList = sorted(accountsList, key=lambda sort_x: (sort_x.getFullAccountName().upper()))
accountSec = JOptionPane.showInputDialog(toolbox_frame_,
"Select an Avg Cost Controlled Acct/Stock to convert to LOT/FiFo",
"CONVERT STOCK FIFO",
JOptionPane.INFORMATION_MESSAGE,
moneydance_ui.getIcon("/com/moneydance/apps/md/view/gui/glyphs/appicon_64.png"),
accountsList,
None)
if not accountSec:
statusLabel.setText(("CONVERT STOCK FIFO - No Account/Security was selected - no changes made..").ljust(800, " "))
statusLabel.setForeground(Color.BLUE)
myPopupInformationBox(toolbox_frame_,"NO CHANGES MADE!",theMessageType=JOptionPane.WARNING_MESSAGE)
return
# noinspection PyUnresolvedReferences
if len(accountSec.getCurrencyType().getSplits()) >0:
# noinspection PyUnresolvedReferences
statusLabel.setText(("CONVERT STOCK FIFO - SORRY - You have %s split(s) on this security %s. I have not been programmed to deal with these - contact the author...." %(len(accountSec.getCurrencyType().getSplits()),accountSec)).ljust(800, " "))
statusLabel.setForeground(Color.RED)
# noinspection PyUnresolvedReferences
myPopupInformationBox(toolbox_frame_,
"SORRY - You have %s split(s) on this security %s. I have not been programmed to deal with these - contact the author...." %(len(accountSec.getCurrencyType().getSplits()),accountSec),
"CONVERT STOCK FIFO",
JOptionPane.ERROR_MESSAGE)
return
MyPopUpDialogBox(toolbox_frame_,
theStatus="Information before you proceed: %s" %(accountSec),
theMessage="This function updates the Acct/Security records as it progresses to generate the report\n"
"There is no pre-report for you to validate/confirm\n"
"1. It will ask you to confirm I can wipe any existing LOT tags incorrectly set first (I will save these)\n"
"2. The report will run, Convert to LOT Control, update the LOT records, and show you the results\n"
"3. If you are not happy, I can reset the Security back to Avg Cost Control (removing/resetting LOT tags)\n"
"4. I will restore wiped (incorrect) LOT tags back to the saved data from step 1.\n"
"** You will be asked to confirm and perform a backup then proceed in the next step....",
theWidth=200,
theTitle="CONVERT STOCK FIFO",
OKButtonText="I HAVE READ THIS",
lAlertLevel=1).go()
if not confirm_backup_confirm_disclaimer(toolbox_frame_,statusLabel,"CONVERT STOCK FIFO","Convert %s to LOT control and assign FiFio?" %(accountSec)):
return
class SecurityObj:
def __init__(self,Obj,Book): # noqa
self.Obj = Obj
self.Acct = Obj.getParentAccount()
self.TxnSet = Book.getTransactionSet().getTransactionsForAccount(Obj)
self.Name = Obj.getAccountName()
self.Num = Obj.getAccountNum()
self.Type = "SECURITY"
self.Balance = Obj.getBalance()
self.CurTyp = Obj.getCurrencyType()
self.AvgCost = Obj.getUsesAverageCost()
self.Txns = []
for _Txn in self.TxnSet:
self.Txns.append(TxnObj(_Txn))
self.Txns.sort(key=lambda l: l.Date)
class TxnObj:
def __init__(self,Txn): # noqa
self.Obj = Txn
self.Parent = Txn.getParentTxn()
self.ID = Txn.getUUID()
self.DateInt = Txn.getDateInt()
self.Type = self.Parent.getInvestTxnType().getIDString()
# noinspection PyUnresolvedReferences
self.Date = datetime.datetime.fromtimestamp(DateUtil.convertIntDateToLong(Txn.getDateInt()).time/1e3)
self.LngShrs = Txn.getValue()
securityAcct = Txn.getAccount()
securityCurr = securityAcct.getCurrencyType()
self.Shares = securityCurr.getDoubleValue(Txn.getValue())
self.saveCostBasisState = self.Obj.getParameter("cost_basis",None)
def MakeCostsFifo(Security,Book, INCLUDE_THE_ZEROS): # noqa
WrngCnt = 0 # noqa
textLog = ""
if not Security.AvgCost:
statusLabel.setText(("CONVERT STOCK FIFO - ERROR - Security is already using LOT control - LOGIC ERROR - ABORTING!").ljust(800, " "))
statusLabel.setForeground(Color.RED)
return
else:
textLog+=("Setting the Security '{}:{}' to FIFO lot matching.\n\n".format(Security.Acct.getAccountName(),Security.Name))
# If you don't do this here, then InvestUtil.getRemainingLots() returns None
Security.Obj.setUsesAverageCost(False)
Security.AvgCost = False
Security.Obj.syncItem()
for Txn in Security.Txns: # noqa
if (InvestUtil.isSaleTransaction(Txn.Parent.getInvestTxnType())
and (Txn.LngShrs != 0 or INCLUDE_THE_ZEROS)):
RLots = InvestUtil.getRemainingLots(Book,Security.Obj,Txn.Obj.getDateInt())
ShrsLeft = -(Txn.LngShrs)
Buys = ""
prettyBuys = ""
for | |
(FilterType, len(PAINSFiltersMap["ID"][FilterType])))
def ProcessOptions():
"""Process and validate command line arguments and options"""
MiscUtil.PrintInfo("Processing options...")
# Validate options...
ValidateOptions()
OptionsInfo["Infile"] = Options["--infile"]
OptionsInfo["InfileParams"] = MiscUtil.ProcessOptionInfileParameters("--infileParams", Options["--infileParams"], Options["--infile"])
OptionsInfo["Outfile"] = Options["--outfile"]
OptionsInfo["OutfileParams"] = MiscUtil.ProcessOptionOutfileParameters("--outfileParams", Options["--outfileParams"], Options["--infile"], Options["--outfile"])
FileDir, FileName, FileExt = MiscUtil.ParseFileName(Options["--outfile"])
OutfileFiltered = "%s_Filtered.%s" % (FileName, FileExt)
OptionsInfo["OutfileFiltered"] = OutfileFiltered
OptionsInfo["OutfileFilteredMode"] = True if re.match("^yes$", Options["--outfileFiltered"], re.I) else False
OptionsInfo["Overwrite"] = Options["--overwrite"]
OptionsInfo["CountMode"] = True if re.match("^count$", Options["--mode"], re.I) else False
OptionsInfo["NegateMatch"] = True if re.match("^yes$", Options["--negate"], re.I) else False
OptionsInfo["MPMode"] = True if re.match("^yes$", Options["--mp"], re.I) else False
OptionsInfo["MPParams"] = MiscUtil.ProcessOptionMultiprocessingParameters("--mpParams", Options["--mpParams"])
OptionsInfo["PAINSMode"] = Options["--painsMode"]
ProcessPAINSMode()
def RetrieveOptions():
"""Retrieve command line arguments and options"""
# Get options...
global Options
Options = docopt(_docoptUsage_)
# Set current working directory to the specified directory...
WorkingDir = Options["--workingdir"]
if WorkingDir:
os.chdir(WorkingDir)
# Handle examples option...
if "--examples" in Options and Options["--examples"]:
MiscUtil.PrintInfo(MiscUtil.GetExamplesTextFromDocOptText(_docoptUsage_))
sys.exit(0)
def ValidateOptions():
"""Validate option values"""
MiscUtil.ValidateOptionFilePath("-i, --infile", Options["--infile"])
MiscUtil.ValidateOptionFileExt("-i, --infile", Options["--infile"], "sdf sd smi txt csv tsv")
MiscUtil.ValidateOptionFileExt("-o, --outfile", Options["--outfile"], "sdf sd smi")
MiscUtil.ValidateOptionsOutputFileOverwrite("-o, --outfile", Options["--outfile"], "--overwrite", Options["--overwrite"])
MiscUtil.ValidateOptionsDistinctFileNames("-i, --infile", Options["--infile"], "-o, --outfile", Options["--outfile"])
MiscUtil.ValidateOptionTextValue("--outfileFiltered", Options["--outfileFiltered"], "yes no")
MiscUtil.ValidateOptionTextValue("-m, --mode", Options["--mode"], "filter count")
if re.match("^filter$", Options["--mode"], re.I):
if not Options["--outfile"]:
MiscUtil.PrintError("The outfile must be specified using \"-o, --outfile\" during \"filter\" value of \"-m, --mode\" option")
MiscUtil.ValidateOptionTextValue("--mp", Options["--mp"], "yes no")
MiscUtil.ValidateOptionTextValue("-n, --negate", Options["--negate"], "yes no")
# Setup a usage string for docopt...
_docoptUsage_ = """
RDKitFilterPAINS.py - Filter PAINS molecules
Usage:
RDKitFilterPAINS.py [--infileParams <Name,Value,...>] [--mode <filter or count>]
[--mp <yes or no>] [--mpParams <Name.Value,...>]
[--outfileFiltered <yes or no>] [ --outfileParams <Name,Value,...> ]
[--painsMode <All or A, B, C>] [--negate <yes or no>]
[--overwrite] [-w <dir>] -i <infile> -o <outfile>
RDKitFilterPAINS.py -h | --help | -e | --examples
Description:
Filter Pan-assay Interference molecules (PAINS) [ Ref 130 - 131 ] from an input
file by performing a substructure search using SMARTS pattern specified in
MAYACHEMTOOLS/lib/data/PAINSFilters.csv file and write out appropriate
molecules to an output file or simply count the number of filtered molecules.
The supported input file formats are: SD (.sdf, .sd), SMILES (.smi, .csv,
.tsv, .txt)
The supported output file formats are: SD (.sdf, .sd), SMILES (.smi)
Options:
-e, --examples
Print examples.
-h, --help
Print this help message.
-i, --infile <infile>
Input file name.
--infileParams <Name,Value,...> [default: auto]
A comma delimited list of parameter name and value pairs for reading
molecules from files. The supported parameter names for different file
formats, along with their default values, are shown below:
SD: removeHydrogens,yes,sanitize,yes,strictParsing,yes
SMILES: smilesColumn,1,smilesNameColumn,2,smilesDelimiter,space,
smilesTitleLine,auto,sanitize,yes
Possible values for smilesDelimiter: space, comma or tab.
-m, --mode <filter or count> [default: filter]
Specify whether to filter the matched molecules and write out the rest of the
molecules to an outfile or simply count the number of matched molecules
marked for filtering.
--mp <yes or no> [default: no]
Use multiprocessing.
By default, input data is retrieved in a lazy manner via mp.Pool.imap()
function employing lazy RDKit data iterable. This allows processing of
arbitrary large data sets without any additional requirements memory.
All input data may be optionally loaded into memory by mp.Pool.map()
before starting worker processes in a process pool by setting the value
of 'inputDataMode' to 'InMemory' in '--mpParams' option.
A word to the wise: The default 'chunkSize' value of 1 during 'Lazy' input
data mode may adversely impact the performance. The '--mpParams' section
provides additional information to tune the value of 'chunkSize'.
--mpParams <Name,Value,...> [default: auto]
A comma delimited list of parameter name and value pairs for to
configure multiprocessing.
The supported parameter names along with their default and possible
values are shown below:
chunkSize, auto
inputDataMode, Lazy [ Possible values: InMemory or Lazy ]
numProcesses, auto [ Default: mp.cpu_count() ]
These parameters are used by the following functions to configure and
control the behavior of multiprocessing: mp.Pool(), mp.Pool.map(), and
mp.Pool.imap().
The chunkSize determines chunks of input data passed to each worker
process in a process pool by mp.Pool.map() and mp.Pool.imap() functions.
The default value of chunkSize is dependent on the value of 'inputDataMode'.
The mp.Pool.map() function, invoked during 'InMemory' input data mode,
automatically converts RDKit data iterable into a list, loads all data into
memory, and calculates the default chunkSize using the following method
as shown in its code:
chunkSize, extra = divmod(len(dataIterable), len(numProcesses) * 4)
if extra: chunkSize += 1
For example, the default chunkSize will be 7 for a pool of 4 worker processes
and 100 data items.
The mp.Pool.imap() function, invoked during 'Lazy' input data mode, employs
'lazy' RDKit data iterable to retrieve data as needed, without loading all the
data into memory. Consequently, the size of input data is not known a priori.
It's not possible to estimate an optimal value for the chunkSize. The default
chunkSize is set to 1.
The default value for the chunkSize during 'Lazy' data mode may adversely
impact the performance due to the overhead associated with exchanging
small chunks of data. It is generally a good idea to explicitly set chunkSize to
a larger value during 'Lazy' input data mode, based on the size of your input
data and number of processes in the process pool.
The mp.Pool.map() function waits for all worker processes to process all
the data and return the results. The mp.Pool.imap() function, however,
returns the the results obtained from worker processes as soon as the
results become available for specified chunks of data.
The order of data in the results returned by both mp.Pool.map() and
mp.Pool.imap() functions always corresponds to the input data.
-n, --negate <yes or no> [default: no]
Specify whether to filter molecules not matching the PAINS filters specified by
SMARTS patterns.
-o, --outfile <outfile>
Output file name.
--outfileFiltered <yes or no> [default: no]
Write out a file containing filtered molecules. Its name is automatically
generated from the specified output file. Default: <OutfileRoot>_
Filtered.<OutfileExt>.
--outfileParams <Name,Value,...> [default: auto]
A comma delimited list of parameter name and value pairs for writing
molecules to files. The supported parameter names for different file
formats, along with their default values, are shown below:
SD: compute2DCoords,auto,kekulize,no
SMILES: kekulize,no,smilesDelimiter,space, smilesIsomeric,yes,
smilesTitleLine,yes,smilesMolName,yes,smilesMolProps,no
Default value for compute2DCoords: yes for SMILES input file; no for all other
file types.
--overwrite
Overwrite existing files.
-p, --painsMode <All or A, B, or C> [default: All]
All or a comma delimited list of PAINS filter family type to used for
filtering molecules.
-w, --workingdir <dir>
Location of working directory which defaults to the current directory.
Examples:
To count the number of molecules not containing any substructure corresponding to
PAINS SMARTS patterns and write out a SMILES file, type:
% RDKitFilterPAINS.py -i Sample.smi -o SampleOut.smi
To count the number of molecules not containing any substructure corresponding to
PAINS SMARTS patterns, perform filtering in multiprocessing mode on all available
CPUs without loading all data into memory, and write out a SMILES file, type:
% RDKitFilterPAINS.py --mp yes -i Sample.smi -o SampleOut.smi
To count the number of molecules not containing any substructure corresponding to
PAINS SMARTS patterns, perform filtering in multiprocessing mode on all available
CPUs by loading all data into memory, and write out a SMILES file, type:
% RDKitFilterPAINS.py --mp yes --mpParams "inputDataMode,InMemory"
-i Sample.smi -o SampleOut.smi
To count the number of molecules not containing any substructure corresponding to
PAINS SMARTS patterns, perform filtering in multiprocessing mode on specific
number of CPUs and chunk size without loading all data into memory, and
write out a SMILES file, type:
% RDKitFilterPAINS.py --mp yes --mpParams "inputDataMode,Lazy,
numProcesses,4,chunkSize,8" -i Sample.smi -o SampleOut.smi
To count the number of molecules not containing any substructure corresponding to
PAINS SMARTS patterns and write out a SMILES file containing these and filtered
molecules, type:
% RDKitFilterPAINS.py | |
<filename>midea_beautiful/cloud.py
"""Interface to Midea cloud API."""
from __future__ import annotations
import base64
from datetime import datetime
import json
import logging
from threading import RLock
from time import sleep, time
from typing import Any, Final, Tuple
from secrets import token_hex, token_urlsafe
import requests
from requests.exceptions import RequestException
from midea_beautiful.crypto import Security
from midea_beautiful.exceptions import (
AuthenticationError,
CloudAuthenticationError,
CloudError,
CloudRequestError,
MideaError,
ProtocolError,
RetryLaterError,
)
from midea_beautiful.midea import (
DEFAULT_API_SERVER_URL,
DEFAULT_APP_ID,
DEFAULT_APPKEY,
DEFAULT_HMACKEY,
DEFAULT_IOTKEY,
DEFAULT_PROXIED,
DEFAULT_SIGNKEY,
)
from midea_beautiful.util import Redacted, is_very_verbose, sensitive
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-arguments
# spell-checker: ignore iampwd mdata
_LOGGER = logging.getLogger(__name__)
CLOUD_API_CLIENT_TYPE: Final = 1 # Android
CLOUD_API_FORMAT: Final = 2 # JSON
CLOUD_API_LANGUAGE: Final = "en_US"
PROTECTED_REQUESTS: Final = ["/v1/user/login/id/get", "/v1/user/login"]
PROTECTED_RESPONSES: Final = [
"/v1/iot/secure/getToken",
"/v1/user/login/id/get",
"/v1/user/login",
"/mj/user/login",
]
_MAX_RETRIES: Final = 3
_DEFAULT_CLOUD_TIMEOUT: Final = 9
_REDACTED_KEYS: Final = {"id": {"length": 4}, "sn": {"length": 8}}
_REDACTED_REQUEST: Final = {"sessionId": {}}
_PROXIED_APP_VERSION: Final = "2.22.0"
_PROXIED_SYS_VERSION: Final = "8.1.0"
def _encode_as_csv(data: bytes | bytearray) -> str:
normalized = []
for byt in data:
if byt >= 128:
byt = byt - 256
normalized.append(str(byt))
string = ",".join(normalized)
return string
def _decode_from_csv(data: str) -> bytes:
int_data = [int(a) for a in data.split(",")]
for i, value in enumerate(int_data):
if value < 0:
int_data[i] = value + 256
return bytes(int_data)
class MideaCloud:
"""Client API for Midea cloud"""
# Default sleep unit of time. By default 1 second.
_DEFAULT_SLEEP_INTERVAL: Final = 1
# Unit of time for sleep.
# Can be set to different value during tests.
sleep_interval: float = _DEFAULT_SLEEP_INTERVAL
def __init__(
self,
appkey: str | None,
account: str,
password: str,
appid: int | str | None = DEFAULT_APP_ID,
api_url: str = DEFAULT_API_SERVER_URL,
sign_key: str = DEFAULT_SIGNKEY,
iot_key: str = DEFAULT_IOTKEY,
hmac_key: str = DEFAULT_HMACKEY,
proxied: str = DEFAULT_PROXIED,
) -> None:
# Get this from any of the Midea based apps
self._appkey = appkey or DEFAULT_APPKEY
self._appid = int(appid or DEFAULT_APP_ID)
self._sign_key = sign_key or DEFAULT_SIGNKEY
self._iot_key = iot_key or DEFAULT_IOTKEY
self._hmac_key = hmac_key or DEFAULT_HMACKEY
self._proxied = proxied or DEFAULT_PROXIED
self._pushtoken = token_urlsafe(120)
# Your email address for your Midea account
self._account = account
self._password = password
# Server URL
self._api_url = api_url
self._country_code = None
self._security = Security(
appkey=self._appkey,
signkey=self._sign_key,
iotkey=self._iot_key,
hmackey=self._hmac_key,
)
basic = base64.b64encode(
f"{self._appkey}:{self._iot_key}".encode("ascii")
).decode("ascii")
sensitive(basic)
self._proxied_auth = f"Basic {basic}"
# Unique user ID that is separate to the email address
self._login_id: str = ""
self._country_code: str = ""
self._id_adapt: str = ""
self._mas_url: str = ""
self._sse_url: str = ""
# A session dictionary that holds the login information of
# the current user
self._session: dict = {}
self._uid: str = ""
self._header_access_token = ""
# Allow for multiple threads to initiate requests
self._api_lock = RLock()
# Count the number of retries for API requests
self.max_retries = _MAX_RETRIES
self._retries = 0
self.request_timeout: float = _DEFAULT_CLOUD_TIMEOUT
# A list of appliances associated with the account
self._appliance_list: list[dict[str, str]] = []
def api_request(
self,
endpoint: str,
args: dict[str, Any] = None,
authenticate=True,
key=None,
data=None,
req_id=None,
instant=None,
) -> Any:
"""
Sends an API request to the Midea cloud service and returns the
results or raises ValueError if there is an error
Args:
endpoint (str): endpoint on the API server
args (dict[str, Any]): arguments for API request
authenticate (bool, optional): should we first attempt to
authenticate before sending request. Defaults to True.
Raises:
CloudRequestError: If an HTTP error occurs
RecursionError: If there were too many retries
Returns:
dict: value of result key in json response
"""
args = args or {}
with self._api_lock:
payload = {}
try:
if authenticate:
self.authenticate()
if endpoint == "user/login" and self._session and self._login_id:
return self._session
# Set up the initial data payload with the global variable set
if data is None:
data = {
"appId": self._appid,
"format": CLOUD_API_FORMAT,
"clientType": CLOUD_API_CLIENT_TYPE,
"language": CLOUD_API_LANGUAGE,
"src": self._appid,
"stamp": datetime.now().strftime("%Y%m%d%H%M%S"),
}
# Add the method parameters for the endpoint
data.update(args)
headers = {}
# Add the sessionId if there is a valid session
if self._session:
if not self._proxied:
data["sessionId"] = self._session["sessionId"]
else:
headers["uid"] = self._uid
headers["accessToken"] = self._header_access_token
url = self._api_url + endpoint
if self._proxied:
error_code_tag = "code"
key = key if key is not None else "data"
if not data.get("reqId"):
data.update(
{
"appVNum": _PROXIED_APP_VERSION,
"appVersion": _PROXIED_APP_VERSION,
"clientVersion": _PROXIED_APP_VERSION,
"platformId": "1",
"reqId": req_id or token_hex(16),
"retryCount": "3",
"uid": self._uid or "",
"userType": "0",
}
)
send_payload = json.dumps(data)
instant = instant or str(int(time()))
sign = self._security.sign_proxied(
None, data=send_payload, random=instant
)
headers.update(
{
"x-recipe-app": str(self._appid),
"Authorization": self._proxied_auth,
"sign": sign,
"secretVersion": "1",
"random": instant,
"version": _PROXIED_APP_VERSION,
"systemVersion": _PROXIED_SYS_VERSION,
"platform": "0",
"Accept-Encoding": "identity",
"Content-Type": "application/json",
}
)
if self._uid:
headers["uid"] = self._uid
if self._header_access_token:
headers["accessToken"] = self._header_access_token
else:
error_code_tag = "errorCode"
key = key if key is not None else "result"
data["sign"] = self._security.sign(url, data)
send_payload = data
if not Redacted.redacting or endpoint not in PROTECTED_REQUESTS:
_LOGGER.debug(
"HTTP request %s: %s %s",
endpoint,
Redacted(headers),
Redacted(data, keys=_REDACTED_REQUEST),
)
response = requests.post(
url=url,
data=send_payload,
timeout=self.request_timeout,
headers=headers,
)
response.raise_for_status()
if not Redacted.redacting or endpoint not in PROTECTED_RESPONSES:
_LOGGER.debug("HTTP response text: %s", Redacted(response.text, 0))
payload = json.loads(response.text)
except RequestException as exc:
return self._retry_api_request(
endpoint=endpoint,
args=args,
authenticate=authenticate,
key=key,
cause=exc,
)
_LOGGER.debug(
"HTTP response: %s",
Redacted(payload)
if not Redacted.redacting or endpoint not in PROTECTED_RESPONSES
else "*** REDACTED ***",
)
# Check for errors, raise if there are any
if str(payload.get(error_code_tag, "0")) != "0":
self.handle_api_error(int(payload[error_code_tag]), payload["msg"])
# If no exception, then retry
return self._retry_api_request(
endpoint=endpoint,
args=args,
authenticate=authenticate,
key=key,
cause=f"{payload['msg']} ({payload[error_code_tag]})",
)
self._retries = 0
result = payload.get(key) if key else payload
if is_very_verbose():
_LOGGER.debug(
"using key='%s', result=%s",
key,
Redacted(result)
if not Redacted.redacting or endpoint not in PROTECTED_RESPONSES
else "*** REDACTED ***",
)
return result
def _sleep(self, duration: float) -> None:
sleep(duration * self.sleep_interval)
def _retry_api_request(
self,
endpoint: str,
args: dict[str, Any] = None,
authenticate=True,
key="result",
cause=None,
) -> Any:
self._retry_check(endpoint, cause)
_LOGGER.debug(
"Retrying API call %s: %d of %d",
endpoint,
self._retries + 1,
self.max_retries,
)
return self.api_request(
endpoint=endpoint, args=args, authenticate=authenticate, key=key
)
def _retry_check(self, endpoint: str, cause):
self._retries += 1
if self._retries >= self.max_retries:
self._retries = 0
raise CloudRequestError(
f"Too many retries while calling {endpoint}, last error {cause}"
) from cause if isinstance(cause, BaseException) else None
# wait few seconds before re-sending data, default is 0
self._sleep(self._retries)
def _get_login_id(self) -> None:
"""
Get the login ID from the email address
"""
response = self.api_request(
"/v1/user/login/id/get",
{"loginAccount": self._account},
authenticate=False,
)
self._login_id: str = response["loginId"]
def _get_region(self) -> None:
"""
Gets the region from the email address
"""
response = self.api_request(
"/v1/multicloud/platform/user/route",
{"userName": self._account},
authenticate=False,
)
self._country_code: str = response["countryCode"]
self._id_adapt: str = response["idAdapt"]
if mas_url := response["masUrl"]:
self._api_url = mas_url
def authenticate(self) -> None:
"""
Performs a user login with the credentials supplied to the
constructor
"""
sensitive(self._account, {"length": -2})
if self._proxied and not self._country_code:
self._get_region()
if not self._login_id:
self._get_login_id()
if self._session:
if self._proxied:
return
if self._session.get("sessionId") is not None:
# Don't try logging in again, someone beat this thread to it
return
# Log in and store the session
if self._proxied:
self._login_proxied()
else:
self._login_non_proxied()
def _login_proxied(self):
login_id = self._login_id
stamp = datetime.now().strftime("%Y%m%d%H%M%S")
self._header_access_token = ""
self._uid = ""
iampwd = self._security.encrypt_iam_password(login_id, self._password)
sensitive(iampwd)
password = self._security.encrypt_password(login_id, self._password)
sensitive(password)
self._session: dict = self.api_request(
"/mj/user/login",
instant=None,
data={
"data": {
"appKey": self._appkey,
"appVersion": _PROXIED_APP_VERSION,
"osVersion": _PROXIED_SYS_VERSION,
"platform": "2",
},
"iotData": {
"appId": str(self._appid),
"appVNum": _PROXIED_APP_VERSION,
"appVersion": _PROXIED_APP_VERSION,
"clientType": CLOUD_API_CLIENT_TYPE,
"clientVersion": _PROXIED_APP_VERSION,
"format": CLOUD_API_FORMAT,
"language": CLOUD_API_LANGUAGE,
"iampwd": iampwd,
"loginAccount": self._account,
"password": password,
"pushToken": self._pushtoken,
"pushType": "4",
"reqId": token_hex(16),
"retryCount": "3",
"src": "10",
"stamp": stamp,
},
"reqId": token_hex(16),
"stamp": stamp,
},
authenticate=False,
)
self._uid = str(self._session.get("uid"))
_LOGGER.debug("UID=%s", self._uid)
sensitive(self._uid)
if mdata := self._session.get("mdata"):
self._header_access_token = mdata["accessToken"]
sensitive(self._header_access_token)
self._security.set_access_token(
str(self._session.get("accessToken")),
str(self._session.get("randomData")),
)
def _login_non_proxied(self):
password = self._security.encrypt_password(self._login_id, self._password)
sensitive(password)
self._session: dict = self.api_request(
"/v1/user/login",
{
"loginAccount": self._account,
"password": password,
},
authenticate=False,
)
if not self._session.get("sessionId"):
raise AuthenticationError("Unable to retrieve session id from Midea API")
sensitive(str(self._session.get("sessionId")))
self._security.access_token = str(self._session.get("accessToken"))
def get_lua_script(
self,
manufacturer="0000",
appliance_type="0xA1",
model="0",
serial_number=None,
version="0",
):
"""Retrieves Lua script used by mobile app"""
response: dict = self.api_request(
"/v1/appliance/protocol/lua/luaGet",
{
"iotAppId": DEFAULT_APP_ID,
"applianceMFCode": manufacturer,
"applianceType": appliance_type,
"modelNumber": model,
"applianceSn": serial_number,
"version": version,
},
key="",
)
if response | |
represent and save method
#=========================================================
def Display_Steps(Step_number, StartPos=1, del_Method = False, cur_step = [0]*100):
"""When the number of steps is defined or 'add step' button is pressed, the function Display_Steps creates the widgets to define the different steps.
It will only display the type of segments. ONce a type of segment is defined, the function "callback" is called and the fields corresponding to the type of segments are displayed
# Step_number: number of segments defined by the user
# StartPos: position at which the step is added
# del_Method: boolean specifying if the current method should be deleted before diplaying the new steps (new method) or not (add segment)
# cur_step: default value of current segment
"""
global Combobox_Type
global Combobox_Stage
global entry_Value
global entry_Velocity
global entry_Time
# Delete previous method if new method is created
if del_Method:
for label in frame_method_def.grid_slaves():
if int(label.grid_info()["row"]) > 1:
label.grid_forget()
# Add widgets for type of segments (+ segment number)
for i in range(Step_number):
label_StepNumber = tk.Label(frame_method_def, text=("Segment {}".format(StartPos+i)))
label_StepNumber.grid(row=StartPos+i+3, column=0, padx=5, pady=5)
label_Type = tk.Label(frame_method_def, text="Type")
label_Type.grid(row=2, column=1, padx=5, pady=5)
#Combobox_Type[StartPos+i-1] = ttk.Combobox(frame_method_def, state="readonly", values = ('','Move stage','Hold','Preload'))
Combobox_Type[StartPos+i-1] = ttk.Combobox(frame_method_def, state="readonly", values = values_Combobox_Type)
Combobox_Type[StartPos+i-1].current(cur_step[i])
Combobox_Type[StartPos+i-1].grid(row=StartPos+i+3, column=1, padx=5, pady=5)
Combobox_Type[StartPos+i-1].bind('<<ComboboxSelected>>', lambda event, arg=StartPos+i-1: callback(event, arg))
#--------------------------------------------------------------------------------------------------------
def Start_Method(Step_number):
""" The function Start_Method allows the user to start a new method containing the number of segments (Step_number)
defined by the user
# Step_number: number of the segments in the method
"""
global Combobox_Type
global Combobox_Stage
global entry_Value
global entry_Velocity
global entry_Time
# Create empty widgets of length the number of segments
Combobox_Type = [None]*Step_number
Combobox_Stage = [None]*Step_number
entry_Value = [None]*Step_number
entry_Velocity = [None]*Step_number
entry_Time = [None]*Step_number
# Clear the previous graph and set the axis labels and titles (by default, stage movement control)
ax.clear()
ax.set_xlabel('Time (s)', fontsize=14)
ax.set_ylabel('Displacement (mm)', fontsize=14)
ax.set_title('Z stage movement', fontsize=18)
ax.grid()
# Display the types of segment for each segment
Display_Steps(Step_number, del_Method = True)
#--------------------------------------------------------------------------------------------------------
def Add_Step():
"""The function Add_Steps() will add a new segment at the end of the segments already defined
"""
global Number_Steps
global Combobox_Type
global Combobox_Stage
global entry_Value
global entry_Velocity
global entry_Time
# Add an empty value in each dictionary
Combobox_Type.append(None)
Combobox_Stage.append(None)
entry_Value.append(None)
entry_Velocity.append(None)
entry_Time.append(None)
# Update the number of segments in the GUI
Number_Steps.set(Number_Steps.get()+1)
# Add widgets in the GUI for the addtional segment
Display_Steps(1, StartPos=Number_Steps.get())
#--------------------------------------------------------------------------------------------------------
def Remove_Step():
"""The function Remove_Step() removes the last step of the method and calls the function PlotMethod()
to update the graphical representation of the segments
"""
global Number_Steps
global Combobox_Type
global Combobox_Stage
global entry_Value
global entry_Velocity
global entry_Time
# Remove widgets from the GUI
for label in frame_method_def.grid_slaves():
if ((int(label.grid_info()["row"]) == 3+Number_Steps.get())):
label.grid_forget()
Number_Steps.set(Number_Steps.get()-1)
# Remove last values from the lists
Combobox_Type.pop()
Combobox_Stage.pop()
entry_Value.pop()
entry_Velocity.pop()
entry_Time.pop()
# Update the graph to remove the last segment
PlotMethod()
#--------------------------------------------------------------------------------------------------------
def Import_Method():
"""The function Import_Method() allows the user to import an existing method and update the parameters.
The function calls the function Display_Method() to set the initial values of the widgets, and
PlotMethod() to display the graphical representation of the segments
"""
# Open a method file
name = askopenfilename(initialdir=Method_Dir_Std, filetypes =(("YAML files", "*.yaml"),("All Files","*.*")), title = "Choose a file.")
# Read the file and create dictionary List
with open(name,'r') as UseFile:
List = yaml.load(UseFile)
# Display content of the method in GUI
Display_Method(List)
#--------------------------------------------------------------------------------------------------------
def Display_Method(List):
"""The function Display_Method(List) displays the method defined by List (called when a method is imported or a list is created)
# List: dictionnary containing all the steps
"""
global Number_Steps
global Combobox_MethodType
global Combobox_Type
global Combobox_Stage
global entry_Value
global entry_Velocity
global entry_Time
# Create empty widgets depending on the number of segments
NumberSteps = len(List)
Combobox_Type = [None]*NumberSteps
Combobox_Stage = [None]*NumberSteps
entry_Value = [None]*NumberSteps
entry_Velocity = [None]*NumberSteps
entry_Time = [None]*NumberSteps
Current_Steps = [None]*NumberSteps
values_Steps = np.array(values_Combobox_Type)
values_Stages = np.array(['X','Y','Z'])
# Display the correct widgets depending on the type of segments
for item, doc in List.items():
Current_Steps[item] = np.where(values_Steps == doc.get('Type'))[0][0]
Display_Steps(NumberSteps, del_Method = True, cur_step = Current_Steps)
# Add the values of each widget as defined in List
for item, doc in List.items():
if (doc.get('Type') == values_Steps[1]): # If Move stage
Current_Control = np.where(np.array(Combobox_MethodType['values']) == doc.get('Control'))[0][0]
Combobox_MethodType.current(Current_Control)
label_Stage = tk.Label(frame_method_def, text="Stage")
label_Stage.grid(row=4+item, column=2, padx=5, pady=5)
Combobox_Stage[item] = ttk.Combobox(frame_method_def, state="readonly", values = ('X','Y','Z'))
Current_Stage = np.where(values_Stages == doc.get('Stage'))[0][0]
Combobox_Stage[item].current(Current_Stage)
Combobox_Stage[item].grid(row=4+item, column=3, padx=5, pady=5)
Current_Value = values_LabelValue[Current_Control]
label_Value = tk.Label(frame_method_def, text=Current_Value)
label_Value.grid(row=4+item, column=4, padx=5, pady=5)
entry_Value[item] = tk.Entry(frame_method_def)
entry_Value[item].insert(0,doc.get('Value'))
entry_Value[item].grid(row=4+item, column=5, padx=5, pady=5)
entry_Value[item].bind("<Return>", PlotMethod)
label_Time = tk.Label(frame_method_def, text="Duration (s)")
label_Time.grid(row=4+item, column=6, padx=5, pady=5)
entry_Time[item] = tk.Entry(frame_method_def)
entry_Time[item].insert(0,doc.get('Duration'))
entry_Time[item].grid(row=4+item, column=7, padx=5, pady=5)
entry_Time[item].bind("<Return>", PlotMethod)
if (doc.get('Type') == values_Steps[2]): # If Hold
label_Time = tk.Label(frame_method_def, text="Duration (s)")
label_Time.grid(row=4+item, column=2, padx=5, pady=5)
entry_Time[item] = tk.Entry(frame_method_def)
entry_Time[item].insert(0,doc.get('Duration'))
entry_Time[item].grid(row=4+item, column=3, padx=5, pady=5)
entry_Time[item].bind("<Return>", PlotMethod)
if (doc.get('Type') == values_Steps[3]): # If Preload
label_Stage = tk.Label(frame_method_def, text="Stage")
label_Stage.grid(row=4+item, column=2, padx=5, pady=5)
Combobox_Stage[item] = ttk.Combobox(frame_method_def, state="readonly", values = ('X','Y','Z'))
Current_Stage = np.where(values_Stages == doc.get('Stage'))[0][0]
Combobox_Stage[item].current(Current_Stage)
Combobox_Stage[item].grid(row=4+item, column=3, padx=5, pady=5)
label_Force = tk.Label(frame_method_def, text="Force (mN)")
label_Force.grid(row=4+item, column=4, padx=5, pady=5)
entry_Value[item] = tk.Entry(frame_method_def)
entry_Value[item].insert(0,doc.get('Value'))
entry_Value[item].grid(row=4+item, column=5, padx=5, pady=5)
label_Velocity = tk.Label(frame_method_def, text="Velocity (mm/s)")
label_Velocity.grid(row=4+item, column=6, padx=5, pady=5)
entry_Velocity[item] = tk.Entry(frame_method_def)
entry_Velocity[item].insert(0,doc.get('Velocity'))
entry_Velocity[item].grid(row=4+item, column=7, padx=5, pady=5)
label_Time = tk.Label(frame_method_def, text="Duration (s)")
label_Time.grid(row=4+item, column=8, padx=5, pady=5)
entry_Time[item] = tk.Entry(frame_method_def)
entry_Time[item].insert(0,doc.get('Duration'))
entry_Time[item].grid(row=4+item, column=9, padx=5, pady=5)
entry_Time[item].bind("<Return>", PlotMethod)
Number_Steps.set(len(List))
# Plot the method
PlotMethod(key=None)
#-------------------------------------------------------------------------------------------------------------------------
def callback(event, arg):
"""When the type of segment is defined (move, hold or preload), the function Display_Steps will call the function callback to display
the parameters related to the type of segment. Once a segment is defined (and Return key is pressed), it will call the function
PlotMethod to add a representation of the segment
# event: the function is called when the return key is pressed
# arg: Position on the grid of the frame (integer)
"""
global Combobox_MethodType
global Combobox_Stage
global Combobox_Control
global entry_Value
global entry_Velocity
global entry_Time
# Delete previous line when type of segment (move / hold / Preload) is changed
for label in frame_method_def.grid_slaves():
if ((int(label.grid_info()["row"]) == 4+arg) and int(label.grid_info()["column"])>1):
label.grid_forget()
# Define the labels depending on the method (Stage movement, displacement or force)
Current_Value_Index = np.where(Combobox_MethodType.get() == np.array(Combobox_MethodType['values']))[0][0]
Current_Value = values_LabelValue[Current_Value_Index]
# Types of segments (move stage / hold / preload)
values_Steps = np.array(values_Combobox_Type) # Type
# Add widgets according to the type of segment
if (event.widget.get() == values_Steps[1]): # if move stage
label_Stage = tk.Label(frame_method_def, text="Stage")
label_Stage.grid(row=4+arg, column=2, padx=5, pady=5)
Combobox_Stage[arg] = ttk.Combobox(frame_method_def, state="readonly", values = ('X','Y','Z'))
Combobox_Stage[arg].current(2)
Combobox_Stage[arg].grid(row=4+arg, column=3, padx=5, pady=5)
Label_Value = tk.Label(frame_method_def, text=Current_Value)
Label_Value.grid(row=4+arg, column=4, padx=5, pady=5)
entry_Value[arg] = tk.Entry(frame_method_def)
entry_Value[arg].grid(row=4+arg, column=5, padx=5, pady=5)
entry_Value[arg].bind("<Return>", PlotMethod)
label_Time = tk.Label(frame_method_def, text="Duration (s)")
label_Time.grid(row=4+arg, column=6, padx=5, pady=5)
entry_Time[arg] = tk.Entry(frame_method_def)
entry_Time[arg].grid(row=4+arg, column=7, padx=5, pady=5)
entry_Time[arg].bind("<Return>", PlotMethod)
if (event.widget.get() == values_Steps[2]): # if hold
label_Time = tk.Label(frame_method_def, text="Duration (s)")
label_Time.grid(row=4+arg, column=2, padx=5, pady=5)
entry_Time[arg] = tk.Entry(frame_method_def)
entry_Time[arg].grid(row=4+arg, column=3, padx=5, pady=5)
entry_Time[arg].bind("<Return>", PlotMethod)
if (event.widget.get() == values_Steps[3]): # if preload
label_Stage = tk.Label(frame_method_def, text="Stage")
label_Stage.grid(row=4+arg, column=2, padx=5, pady=5)
Combobox_Stage[arg] = ttk.Combobox(frame_method_def, state="readonly", values = ('X','Y','Z'))
Combobox_Stage[arg].current(2)
Combobox_Stage[arg].grid(row=4+arg, column=3, padx=5, pady=5)
label_Force = tk.Label(frame_method_def, text="Force (mN)")
label_Force.grid(row=4+arg, column=4, padx=5, pady=5)
entry_Value[arg] = tk.Entry(frame_method_def)
entry_Value[arg].grid(row=4+arg, column=5, padx=5, pady=5)
label_Velocity = tk.Label(frame_method_def, text="Velocity (mm/s)")
label_Velocity.grid(row=4+arg, column=6, padx=5, pady=5)
entry_Velocity[arg] = tk.Entry(frame_method_def)
entry_Velocity[arg].grid(row=4+arg, column=7, padx=5, pady=5)
entry_Velocity[arg].bind("<Return>", PlotMethod)
label_Time = tk.Label(frame_method_def, text="Duration (s)")
label_Time.grid(row=4+arg, column=8, padx=5, pady=5)
entry_Time[arg] = tk.Entry(frame_method_def)
entry_Time[arg].grid(row=4+arg, column=9, padx=5, pady=5)
entry_Time[arg].bind("<Return>", PlotMethod)
#-------------------------------------------------------------------------------------------------------------------------
def PlotMethod(key=None):
"""The function PlotMethod gets x_time and y_disp and calls the function Plot_Graph to plot the different segments
and have a representation of the method (only stage Z)
The function does not represent the preload- only the holding of preload
# key: the function is called when the return key is pressed, or when importing a method
"""
global ax
global canvas
global List_Types
global List_Value
global List_Vel
global List_Time
global X_Time
global Y_Disp
# Create empty lists with length of number of segments (+1 for X and Y)
List_Types = [None] * Number_Steps.get()
List_Value = [None] * Number_Steps.get()
List_Vel = [None] * Number_Steps.get()
List_Time = [None] * Number_Steps.get()
X_time = [None] * (Number_Steps.get()+1)
X_time[0] = 0
Y_disp = [None] * (Number_Steps.get()+1)
Y_disp[0] = 0
values_Steps = np.array(values_Combobox_Type) # Types of segments (move stage / hold / preload)
# Add values in the list
for i in range(Number_Steps.get()):
try:
List_Types[i] = Combobox_Type[i].get()
if (List_Types[i]==values_Steps[1]): # if move stage
List_Value[i] = (float(entry_Value[i].get()))
List_Vel[i] = abs(float(entry_Value[i].get())/float(entry_Time[i].get())) #(float(entry_Velocity[i].get()))
List_Time[i] = (float(entry_Time[i].get()))
X_time[i+1] = X_time[i] + List_Time[i]
if (Combobox_Stage[i].get() == 'Z'):
Y_disp[i+1] = List_Value[i] + Y_disp[i]
else:
Y_disp[i+1] = Y_disp[i]
else:
List_Value[i] = 'na'
List_Vel[i] = 'na'
List_Time[i] = (float(entry_Time[i].get()))
X_time[i+1] = X_time[i] + List_Time[i]
Y_disp[i+1] = Y_disp[i]
except:
break
try:
index = np.where((np.array(X_time)==None)&(np.array(Y_disp)==None))[0][0]
except:
index = len(Y_disp)
Plot_Graph(X_time, Y_disp, ax, canvas, index)
#-------------------------------------------------------------------------------------------------------------------------
def Plot_Graph(X_time, Y_disp, ax, canvas, index=None):
"""The function Plot_Graph plots the different segments to have a representation of the method (only stage Z). Depending on the type
of method (Stage movement, displacement control, force control)m it plots either W stage movementm displacementm or force
The function does not represent the preload- only the holding of preload
# X_Time: array of time
# Y_disp: array of stage movement / displacement / Force
# ax, canvas: figure and canvas in which the graph is plotted
"""
global Combobox_MethodType
Type_Method = Combobox_MethodType.get()
ax.clear()
line, = ax.plot(X_time, Y_disp, '.-') #tuple of a single element
try:
ax.set_xlim(min(X_time[0:index]), max(X_time[0:index]))
ax.set_ylim(min(Y_disp[0:index])-1, max(Y_disp[0:index])+1)
except:
ax.set_xlim(0, 10)
ax.set_ylim(-5, 5)
ax.set_xlabel('Time (s)', fontsize=14)
if Type_Method == Combobox_MethodType['values'][0]:
ax.set_ylabel('Displacement (mm)', fontsize=14)
ax.set_title('Z stage movement', fontsize=16)
if Type_Method == Combobox_MethodType['values'][1]:
ax.set_ylabel('Displacement (um)', fontsize=14)
ax.set_title('Displacement', fontsize=16)
if Type_Method == Combobox_MethodType['values'][2]:
ax.set_ylabel('Force (mN)', fontsize=14)
ax.set_title('Force', fontsize=16)
ax.grid()
canvas.draw()
canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=True)
#-------------------------------------------------------------------------------------------------------------------------
def GetMethod():
"""The function GetMethod() creates a dictionary containing the method,
and the function SaveMethod() saves the different segments in a yaml format
"""
global Combobox_MethodType
global List_Types
global List_Stages
global List_Value
global List_Vel
global List_Time
global Dict_Grid
global Dict_Final
global Cur_Point
List_Types = [None] * Number_Steps.get()
List_Stages = [None] * Number_Steps.get()
List_Value = [None] * Number_Steps.get()
List_Vel = [None] * Number_Steps.get()
List_Time = [None] * Number_Steps.get()
Name_method = Combobox_MethodType.get()
values_Steps | |
<reponame>jt112/SymplecticElements<filename>hrDataFeeder.py
#!/usr/bin/env python
# This script queries the Faculty Data Repository (FDR) and the Service Directory (SD) for faculty HR data to feed Symplectic Elements.
# For FDR records without email addresses it can query the LDAP Service Directory using the python util package
# from https://intranet.lib.duke.edu/download/python/
# There are few tricky spots in here due to the untrustworthiness of the FDR data, which the FDR people cannot or will not fix.
import cx_Oracle
import logging
import logging.handlers
from os.path import join
from os import getcwd, environ
from sys import exit
from djangoutil.xmlrpc import getServerProxy
from xml.sax.saxutils import escape
from ConfigParser import SafeConfigParser
import codecs
import io
from django.conf import settings
from djangoutil import config
settings.configure(config)
# encoding=utf8
import sys
reload(sys)
sys.setdefaultencoding('utf8')
# set to UTF-8 to capture diacritics
environ['NLS_LANG']= 'AMERICAN_AMERICA.AL32UTF8'
# database configuration
usedb = 'test' # choose database: dev, test, or prod
config = SafeConfigParser()
config.read(join(getcwd(), 'fdr.config')) # read config file to gather parameters
dbhost = config.get(usedb, 'dbhost')
dbport = config.get(usedb, 'dbport')
dbsid = config.get(usedb, 'dbsid')
dbuser = config.get(usedb, 'dbuser')
dbpassword = config.get(usedb, 'dbpassword')
useldapforemail = False # LDAP is slow and hasn't returned significant number of emails. If False, use <EMAIL> instead.
sd_file = join(getcwd(), 'libsymel.dat') # Nightly export of Service Directory data
xmlfile = join(getcwd(), 'people.xml') # Output file for Symplectic Elements consumption
affiliationsfile = join(getcwd(), 'affiliations.txt') # Output file for unique affiliations to populate Elements Auto Groups
# instantiate and configure logger
logfile = join(getcwd(), 'hrDataFeeder.log')
logger = logging.getLogger('fdrlogger')
logger.setLevel(logging.DEBUG)
handler = logging.handlers.RotatingFileHandler(logfile, maxBytes=20971520, backupCount=5) # limit to 6 files of 20 MB or less
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
# retrieve results from Faculty Data Repository (FDR)
def getResults(ora, sql):
ocur = ora.cursor()
ocur.execute(sql)
res = ocur.fetchall()
ocur.close()
return res
# Take list of dictionaries and build XML elements. Return string.
def buildXml(list):
sequence_dict = {1:'Secondary', 2:'Tertiary', 3:'Quaternary', 4:'Quinary', 5:'Senary', 6:'Septenary', 7:'Octonary', 8:'Nonary', 9:'Denary'}
xml = ''
for record in list:
xml += '\t\t<person>\n'
xml += '\t\t\t<Lastname>%s</Lastname>\n' % (record['surname'])
xml += '\t\t\t<Firstname>%s</Firstname>\n' % (record['forename'])
try:
xml += '\t\t\t<Middlename>%s</Middlename>\n' % (record['middlename'])
except:
pass
xml += '\t\t\t<Email>%s</Email>\n' % (record['email']) # removing angle brackets in some email fields
xml += '\t\t\t<Proprietary_ID>%s</Proprietary_ID>\n' % (record['duid'])
xml += '\t\t\t<Username>%s</Username>\n' % (record['netid'])
xml += '\t\t\t<PrimaryGroupDescriptor>%s</PrimaryGroupDescriptor>\n' % (escape(record['primary']))
# this must change in response to addition of school
if 'secondary' in record:
if len(record['secondary']) > 0:
i = 1
for appointment in record['secondary']:
xml += '\t\t\t<%sGroupDescriptor>%s</%sGroupDescriptor>\n' % (sequence_dict[i], escape(appointment.strip()), sequence_dict[i])
i += 1
xml += '\t\t\t<IsAcademic>%s</IsAcademic>\n' % (record['academic'])
xml += '\t\t\t<LoginAllowed>%s</LoginAllowed>\n' % (record['login'])
xml += '\t\t\t<AuthenticatingAuthority>%s</AuthenticatingAuthority>\n' % (record['authority'])
xml += '\t\t</person>\n'
return xml
# Build list of dictionaries of FDR people. Also return list of Duke Unique IDs.
def buildFdrDict(data, rpcserver, sd_dict_list):
print 'buildFdrDict'
fdr_dict_list = []
# CHANGE THIS. DROP FDR RECORD WITHOUT NETID, USE NETID as KEY
netid_list = []
missing_fdr_email = 0
missing_email_found_sd = 0
for record in data:
drop_record = False
fdr_dict = {}
try: # Confusing. FDR forced their names on us. Their PRIMARY_SCHOOL is our primary group, all other groups are secondary for us.
duid, netid, salutation, surname, forename, middlename, lsurname, lforename, lmiddlename, email, primary, school, secondary, primary_affiliation = record
except ValueError:
logmessage = 'Database view has changed.'
logger.critical(logmessage)
exit()
if not netid: # Some people records do not contain netid. Look in SD file. If not there, log and discard person.
logmessage = 'Record dropped - No NetID in FDR. %s %s, %s' % (forename, surname, duid)
logger.critical(logmessage)
print logmessage
drop_record = True
continue
else:
pass
# for person in sd_dict_list: # Look through SD records
# if duid == person['duid']: # If DUID matches...
# print person
# netid = person['netid'] # Assign SD netid to person
# logmessage = "Found FDR person %s missing netid." % (duid)
# logger.info(logmessage)
# print logmessage
# break
# else: # If also no netid in SD, log and set flag to drop this record.
# logmessage = "Person %s missing netid in FDR and SD." % (duid)
# logger.critical(logmessage)
# print logmessage
# drop_record = True
if surname: # If professional name set, use that. Otherwise fall back to legal name.
fdr_dict['surname'] = surname
fdr_dict['forename'] = forename
if middlename: # Many records do not contain middle name.
fdr_dict['middlename'] = middlename
else: # Legal name block
fdr_dict['surname'] = lsurname
fdr_dict['forename'] = lforename
if lmiddlename:
fdr_dict['middlename'] = lmiddlename
if not email: # Some people do not have email addresses for some reason that I cannot comprehend.
missing_fdr_email += 1
if not drop_record: # If there's no netid, there's no point in continuing with this record.
for person in sd_dict_list: # Look through SD records
if duid == person['duid']: # If DUID matches...
email = person['email'] # Assign SD netid to person
logmessage = "FDR person %s missing email found in Service Directory." % (duid)
missing_email_found_sd += 1
#print logmessage
#logger.info(logmessage)
break
else:
email = person['email']
email = email.translate(None, "<>") # Remove angle brackets present in some email fields
#email = netid + "@duke.edu"
fdr_dict['email'] = email
fdr_dict['duid'] = duid
fdr_dict['netid'] = netid
fdr_dict['primary'] = school
# Non-primary appointments. Convert double-pipe delimited string to list and add PRIMARY_VIVO_ORG to that.
secondary_deduped_list = [] # Deduplicate the secondary appointments. Often duplicates.
if secondary:
secondary = secondary.strip() # Remove EOL character.
if '||' in secondary: # Double pipes indicates concatenated result.
secondary_list = secondary.split('||') # Split results into list
for appt in secondary_list:
if (appt not in secondary_deduped_list) and (appt != school): # Don't want school twice.
secondary_deduped_list.append(appt)
elif secondary != school: # Single result, dedupe against school.
secondary_deduped_list.append(secondary)
if (primary not in secondary_deduped_list) and (primary != school): # Dedupe primary against secondary appts and school.
secondary_list.append(primary)
fdr_dict['secondary'] = secondary_deduped_list
fdr_dict['academic'] = 'Y'
fdr_dict['login'] = 'Y'
fdr_dict['authority'] = 'Shibboleth'
netid_list.append(netid)
if not drop_record:
fdr_dict_list.append(fdr_dict)
else: # Discard this record and log.
logmessage = 'Record dropped for DUID:%s Forename: %s Surname: %s' % (duid, forename, surname)
logger.info(logmessage)
if missing_fdr_email > 0:
logmessage = '%s FDR records without email addresses' % (missing_fdr_email)
logger.info(logmessage)
print '%s people missing FDR email found in SD' % (missing_email_found_sd)
return fdr_dict_list, netid_list
# Build list of dictionaries of service directory entries after deduplicating people from FDR
def buildSdDict(sd_file):
sd_dict_list = []
duplicates = 0
sd_missing_email = 0
sd = open(sd_file, 'r')
print '1'
for line in sd:
sd_dict = {}
duid , netid, surname, forename, email, status = line.split('|')
sd_dict['duid'] = duid
sd_dict['netid'] = netid
sd_dict['surname'] = surname
sd_dict['forename'] = forename
sd_dict['primary'] = status.strip() # Remove line break
sd_dict['academic'] = 'N'
sd_dict['login'] = 'Y'
sd_dict['authority'] = 'Shibboleth'
if email:
email = email.translate(None, "<>") # Remove angle brackets present in some email fields
sd_dict['email'] = email
else:
sd_dict['email'] = netid + '@duke.edu'
sd_missing_email += 1
sd_dict_list.append(sd_dict)
sd.close()
logmessage = 'Found %s Service Directory records.' % (len(sd_dict_list) + duplicates)
logger.info(logmessage)
logmessage = '%s Service Directory records without email addresses' % (sd_missing_email)
logger.info(logmessage)
#logmessage = '%s Service Directory records were duplicates.' % (duplicates)
#logger.info(logmessage)
print 'testing" return buildDdDict'
return sd_dict_list
# Deduplicate the SD people to prevent creating multiple accounts as some will appear in FDR data.
def dedupeSdDictList(sd_dict_list, netid_list):
duplicates = 0
sd_dict_list_dedupe = []
for record in sd_dict_list:
if record['netid'] not in netid_list: # Deduplicate these records against FDR records.
sd_dict_list_dedupe.append(record)
duplicates += 1
logmessage = "Found %s Service record duplicates." % (duplicates)
logger.info(logmessage)
return sd_dict_list_dedupe
# Serialize list of unique affiliations to populate Elements Auto Groups
def getUniqueAffiliations(fdr_dict_list):
unique_affiliations_list = []
for dict in fdr_dict_list:
if 'secondary' in dict:
for affiliation in dict['secondary']:
if affiliation not in unique_affiliations_list:
unique_affiliations_list.append(affiliation)
return unique_affiliations_list
if __name__=='__main__':
try:
logmessage = "Starting update." # Begin logging
logger.info(logmessage)
dbdsn = cx_Oracle.makedsn(dbhost, dbport, dbsid) # Open the connection to the FDR database
try:
ora = cx_Oracle.connect(dbuser, dbpassword, dbdsn)
except:
logmessage = 'Database connection error.'
logger.critical(logmessage)
exit()
sql = 'select DUID, NETID, SALUTATION, SURNAME, FIRSTNAME, MIDDLENAME, LEGAL_SURNAME, LEGAL_FIRSTNAME, LEGAL_MIDDLENAME, EMAIL, PRIMARY_VIVO_ORG, PRIMARY_SCHOOL, affiliations, PRIMARY_AFFILIATION from APT.V_PEOPLE_WITH_AFFILIATIONS'
data = getResults(ora, sql) # Query FDR. data is a list of tuples, 1 tuple per record.
logmessage = 'Found %s FDR faculty.' % (len(data))
logger.info(logmessage)
ora.close()
xml_preabmle = '<?xml version="1.0" encoding="UTF-8" ?>\n<HR_Data>\n' # Begin the XML string to write to people.xml
xml_preabmle | |
import functools
import six
from six.moves import builtins
import string
import threading
import warnings
import numpy
from cupy.core import core
from cupy import creation
from cupy import logic
from cupy import math
from cupy import sorting
from cupy import statistics
_thread_local = threading.local()
class FusionOp(object):
def __init__(self, name, operation, param_names,
nin, nout, in_vars, out_vars, types, num):
self.name = name
self.operation = operation
self.param_names = param_names
self.nin = nin
self.nout = nout
self.in_vars = in_vars
self.out_vars = out_vars
self.types = types
self.num = num
def __repr__(self):
return "<FusionOp, name={}, types=[{}]>".format(
self.name, ', '.join(_.name for _ in self.types))
class _FusionVar(object):
def __init__(self, num, ty, const=None):
self.num = num
self.ty = ty
self.const = const
def __repr__(self):
return "<_FusionVar, num={}, ty={}, const={}>".format(
self.num, self.ty, self.const)
class _FusionMem(object):
def __init__(self, var_list):
self.op_list = []
self.var_list = var_list[:]
def __repr__(self):
return "<_FusionMem, op_list={}, var_list={}>".format(
self.op_list,
self.var_list)
def get_fresh(self, ty, **kwargs):
n = len(self.var_list)
ret = _FusionVar(n, ty, **kwargs)
self.var_list.append(ret)
return ret
def set_op(self, name, operation, param_names,
nin, nout, in_vars, out_vars, types):
num = len(self.op_list)
op = FusionOp(name, operation, param_names,
nin, nout, in_vars, out_vars, types, num)
self.op_list.append(op)
class _FusionRef(object):
def __init__(self, var, mem):
self._var = var
self.dtype = var.ty
self._mem = mem
def __repr__(self):
return "<_FusionRef, dtype=%s>" % self.dtype
def __neg__(self):
return negative(self)
def __add__(self, other):
return add(self, other)
def __iadd__(self, other):
return add(self, other, self)
def __radd__(self, other):
return add(other, self)
def __sub__(self, other):
return subtract(self, other)
def __isub__(self, other):
return subtract(self, other, self)
def __rsub__(self, other):
return subtract(other, self)
def __mul__(self, other):
return multiply(self, other)
def __imul__(self, other):
return multiply(self, other, self)
def __rmul__(self, other):
return multiply(other, self)
def __div__(self, other):
return divide(self, other)
def __idiv__(self, other):
return divide(self, other, self)
def __rdiv__(self, other):
return divide(other, self)
def __truediv__(self, other):
return true_divide(self, other)
def __itruediv__(self, other):
return true_divide(self, other, self)
def __rtruediv__(self, other):
return true_divide(other, self)
def __floordiv__(self, other):
return floor_divide(self, other)
def __ifloordiv__(self, other):
return floor_divide(self, other, self)
def __rfloordiv__(self, other):
return floor_divide(other, self)
def __mod__(self, other):
return remainder(self, other)
def __imod__(self, other):
return remainder(self, other, self)
def __rmod__(self, other):
return remainder(other, self)
def __pow__(x, y):
return power(x, y)
def __ipow__(self, other):
return power(self, other, self)
def __lshift__(self, other):
return left_shift(self, other)
def __ilshift__(self, other):
return left_shift(self, other, self)
def __rlshift__(self, other):
return left_shift(other, self)
def __rshift__(self, other):
return right_shift(self, other)
def __irshift__(self, other):
return right_shift(self, other, self)
def __rrshift__(self, other):
return right_shift(other, self)
def __and__(self, other):
return bitwise_and(self, other)
def __iand__(self, other):
return bitwise_and(self, other, self)
def __rand__(self, other):
return bitwise_and(other, self)
def __or__(self, other):
return bitwise_or(self, other)
def __ior__(self, other):
return bitwise_or(self, other, self)
def __ror__(self, other):
return bitwise_or(other, self)
def __xor__(self, other):
return bitwise_xor(self, other)
def __ixor__(self, other):
return bitwise_xor(self, other, self)
def __rxor__(self, other):
return bitwise_xor(other, self)
def __invert__(self):
return invert(self)
def __lt__(self, other):
return less(self, other)
def __le__(self, other):
return less_equal(self, other)
def __eq__(self, other):
return equal(self, other)
def __ne__(self, other):
return not_equal(self, other)
def __gt__(self, other):
return greater(self, other)
def __ge__(self, other):
return greater_equal(self, other)
def __nonzero__(self):
raise Exception("Can't cast to bool")
def __bool__(self):
raise Exception("Can't cast to bool")
def __setitem__(self, slices, value):
if slices is Ellipsis or (isinstance(slices, slice) and
slices == slice(None)):
copy(value, self)
else:
raise ValueError('The fusion supports `[...]` or `[:]`.')
def copy(self):
return copy(self)
_kind_score = {
'b': 0,
'u': 1,
'i': 1,
'f': 2,
'c': 3,
}
_dtype_to_ctype = {
numpy.dtype('float64'): 'double',
numpy.dtype('float32'): 'float',
numpy.dtype('float16'): 'float16',
numpy.dtype('complex128'): 'complex<double>',
numpy.dtype('complex64'): 'complex<float>',
numpy.dtype('int64'): 'long long',
numpy.dtype('int32'): 'int',
numpy.dtype('int16'): 'short',
numpy.dtype('int8'): 'signed char',
numpy.dtype('uint64'): 'unsigned long long',
numpy.dtype('uint32'): 'unsigned int',
numpy.dtype('uint16'): 'unsigned short',
numpy.dtype('uint8'): 'unsigned char',
numpy.dtype('bool'): 'bool',
}
_dtype_list = [numpy.dtype(_) for _ in '?bhilqBHILQefd']
def _const_to_str(val):
return str(val).lower() if type(val) is bool else str(val)
def _normalize_arg(arg, mem):
arg_type = type(arg)
if arg_type is _FusionRef:
return arg._var
is_scalar = arg_type in [int, float, bool]
is_ndarray = hasattr(arg, 'dtype') and arg.dtype in _dtype_list
if is_scalar or is_ndarray:
return mem.get_fresh(numpy.dtype(arg_type), const=arg)
raise Exception('Unsupported type %s' % arg_type)
def _convert(f):
if type(f) is core.ufunc:
return _convert_from_ufunc(f)
if type(f) is core.ElementwiseKernel:
return _convert_from_elementwise(f)
raise Exception("Can't convert from %s to FusionOp" % type(f))
def _should_use_min_scalar(in_args):
max_array_kind = -2
max_scalar_kind = -1
for i in in_args:
kind = _kind_score[i.ty.kind]
if i.const is None:
max_array_kind = max(max_array_kind, kind)
else:
max_scalar_kind = max(max_scalar_kind, kind)
return (max_scalar_kind != -1 and
max_array_kind >= max_scalar_kind)
def _convert_from_ufunc(ufunc):
nin = ufunc.nin
nout = ufunc.nout
def get_mem(args):
for i in args:
if type(i) == _FusionRef:
return i._mem
raise Exception('number of ndarray arguments must be more than 0')
def can_cast1(args, ty_ins):
for i in six.moves.range(nin):
if args[i].const is None:
if not numpy.can_cast(args[i].ty, ty_ins[i]):
return False
else:
if not numpy.can_cast(args[i].const, ty_ins[i]):
return False
return True
def can_cast2(args, ty_ins):
for i in six.moves.range(nin):
if not numpy.can_cast(args[i].ty, ty_ins[i]):
return False
return True
def res(*args, **kwargs):
mem = get_mem(args)
var_list = [_normalize_arg(_, mem) for _ in args]
if 'out' in kwargs:
var_list.append(_normalize_arg(kwargs.pop('out'), mem))
if kwargs:
raise TypeError('Wrong arguments %s' % kwargs)
assert nin <= len(var_list) <= nin + nout
in_vars = var_list[:nin]
out_vars = var_list[nin:]
can_cast = can_cast1 if _should_use_min_scalar(in_vars) else can_cast2
for ty_ins, ty_outs, op in ufunc._ops:
ty_ins = [numpy.dtype(_) for _ in ty_ins]
ty_outs = [numpy.dtype(_) for _ in ty_outs]
if can_cast(in_vars, ty_ins):
param_names = (['in%d' % i for i in six.moves.range(nin)] +
['out%d' % i for i in six.moves.range(nout)])
ret = []
for i in six.moves.range(nout):
if i >= len(out_vars):
v = mem.get_fresh(ty_outs[i])
out_vars.append(v)
ret.append(_FusionRef(v, mem))
elif numpy.can_cast(ty_outs[i], out_vars[i].ty,
"same_kind"):
v = out_vars[i]
ret.append(_FusionRef(v, mem))
else:
raise TypeError(
'output (typecode \'{}\') could not be coerced '
'to provided output parameter (typecode \'{}\') '
'according to the casting rule '
'"same_kind"'.format(
ty_outs[i].char, out_vars[i].ty.char))
mem.set_op(ufunc.name, op, param_names, nin, nout,
in_vars, out_vars, ty_ins + ty_outs)
return ret[0] if len(ret) == 1 else tuple(ret)
raise TypeError('Invalid type cast in \'{}\': {} -> {}'.format(
ufunc.name,
[_.ty for _ in in_vars],
[_.ty for _ in out_vars]))
return res
def _convert_from_elementwise(elem):
raise Exception('Not Impletmented')
def _gather_submodules(ops):
return {(op.name, tuple(op.types)): op for op in ops}
def _get_params(var_list):
return ['%s v%d' % (var.ty, var.num) for var in var_list]
def _get_out_params(var_list):
return ['%s ret%d' % (var.ty, i) for i, var in enumerate(var_list)]
def _get_declaration_from_var(var):
if var.const is None:
return '%s v%d;\n' % (_dtype_to_ctype[var.ty], var.num)
else:
return 'const %s v%d = %s;\n' % (
_dtype_to_ctype[var.ty],
var.num,
_const_to_str(var.const))
def _get_declaration_from_op(op):
return ''.join('%s v%d_%d;\n' % (_dtype_to_ctype[t], op.num, j)
for j, t in enumerate(op.types))
def _get_operation_code(op):
code = ''.join('v%d_%d = v%d;\n' % (op.num, i, v.num)
for i, v in enumerate(op.in_vars))
params = ['v%d_%d' % (op.num, i)
for i in six.moves.range(op.nin + op.nout)]
code += op.name + '(' + ', '.join(params) + ');\n'
code += ''.join('v%d = v%d_%d;\n' %
(v.num, op.num, i + op.nin)
for i, v in enumerate(op.out_vars))
return code
def _get_submodule_code(op):
parameters = ', '.join('%s &%s' % (_dtype_to_ctype[t], name)
for i, (name, t)
in enumerate(zip(op.param_names, op.types)))
typedecl = ''.join(('typedef %s in%d_type;\n' % (_dtype_to_ctype[t], i))
for i, t in enumerate(op.types[:op.nin]))
typedecl += ''.join(('typedef %s out%d_type;\n' % (_dtype_to_ctype[t], i))
for i, t in enumerate(op.types[op.nin:]))
module_code = string.Template('''
__device__ void ${name}(${parameters}) {
${typedecl}
${operation};
}
''').substitute(
name=op.name,
parameters=parameters,
operation=op.operation,
typedecl=typedecl)
return module_code + '\n'
def _get_pre_code(in_vars, out_vars, operation):
in_params = ', '.join('%s v%s' % (_dtype_to_ctype[v.ty], v.num)
for v in in_vars)
out_params = ''.join('%s v%s;\n' % (_dtype_to_ctype[v.ty], v.num)
for v in out_vars)
module_code = string.Template('''
__device__ ${return_type} _pre_map(${in_params}) {
${out_params}
${operation};
return ${return_var};
}
''').substitute(
return_type=_dtype_to_ctype[out_vars[0].ty],
in_params=in_params,
out_params=out_params,
operation=operation,
return_var='v%d' % out_vars[0].num)
return module_code
def _get_reduce_op(ops, dtype):
for i in ops._ops:
if numpy.can_cast(dtype.type, i[0][0]):
return i
raise TypeError("Type is mismatched. %s(...), %s" % (ops.name, dtype.type))
def _get_post_code(post_vars, operation, post_out):
module_code = string.Template('''
__device__ ${return_type} _post_map(${arg_type} v0) {
${operation};
return v${return_var};
}
''').substitute(
arg_type=_dtype_to_ctype[post_vars[0].ty],
return_type=_dtype_to_ctype[post_vars[post_out.num].ty],
operation=operation,
return_var=post_out.num)
return module_code
def _get_fix_code(data_type, fixed_type, operation):
module_code = string.Template('''
__device__ ${fixed_type} _post_fix(${data_type} a) {
${fixed_type} out0;
${operation};
return out0;
}
''').substitute(
data_type=data_type,
fixed_type=_dtype_to_ctype[fixed_type],
operation=operation)
return module_code
def _get_fusion(func, nin, reduce, post_map, identity, input_types, name):
in_vars = [_FusionVar(i, t) for i, t in enumerate(input_types)]
mem = _FusionMem(in_vars)
in_refs = [_FusionRef(_, mem) for _ in in_vars]
out_refs = func(*in_refs)
out_refs = list(out_refs) if type(out_refs) == tuple else [out_refs]
out_refs = [_ for _ in out_refs if _ is not None]
out_refs = | |
{'table': self.config['table'], 'column': column}}))
if schema[column]['nullable'] != self.config['schema'][column]['nullable']:
raise ValueError(text_token({'E05004': {'table': self.config['table'], 'column': column}}))
return columns
def _table_exists(self):
"""Test if the table exists in the database.
Returns
-------
(bool) True if the table exists else False.
"""
backoff_gen = backoff_generator(_INITIAL_DELAY, _BACKOFF_STEPS, _BACKOFF_FUZZ)
while not self._db_exists() and self.config['wait_for_db']:
backoff = next(backoff_gen)
_logger.info(text_token({'I05005': {'dbname': self.config['database']['dbname'], 'backoff': backoff}}))
sleep(backoff)
return self._db_transaction((_TABLE_EXISTS_SQL.format(sql.Literal(self.config['table'])), ))[0].fetchone()[0]
def _add_alignment(self, definition):
"""Add the byte alignment of the column type to the column definition.
Alignment depends on the column type and is an integer number of bytes usually
1, 2, 4 or 8. A value of 0 is used to define a variable alignment field.
Args
----
definition (dict): Column definition as defined by raw_table_column_config_format.json
Returns
-------
(dict): A column definition plus an 'alignment' field.
"""
upper_type = definition['type'].upper()
array_idx = upper_type.find('[')
fixed_length = upper_type.find('[]') == -1
if array_idx != -1:
upper_type = upper_type[:array_idx]
definition['alignment'] = _TYPE_ALIGNMENTS.get(upper_type.strip(), 0) if fixed_length else 0
return definition
def _order_schema(self):
"""Order table columns to minimise disk footprint.
A small performance/resource benefit can be gleaned from ordering the columns
of a table to reduce packing/alignment costs.
See https://stackoverflow.com/questions/12604744/does-the-order-of-columns-in-a-postgres-table-impact-performance
Returns
-------
(list(tuple(str, dict))): Tuples are (column name, definition) sorted in descending alignment
requirment i.e. largest to smallest, with variable l
"""
definition_list = [(c, self._add_alignment(d)) for c, d in self.config['schema'].items()]
return sorted(definition_list, key=lambda x: str(x[1]['alignment']) + x[0], reverse=True)
def _create_table(self):
"""Create the table if it does not exists and the user has privileges to do so.
Assumption is that other processes may also be trying to create the table and so
duplicate table (or privilege) exceptions are not considered errors just a race condition
to wait out. If this process does create the table then it will set the self.creator flag.
Returns
-------
(tuple(str)) Column names.
"""
columns, self._columns = [], []
definition_list = self._order_schema()
_logger.info("Table will be created with columns in the order logged below.")
for column, definition in definition_list:
sql_str = " " + definition['type']
if not definition['nullable']:
sql_str += " NOT NULL"
if definition['primary_key']:
sql_str += " PRIMARY KEY"
if definition['unique'] and not definition['primary_key']:
sql_str += " UNIQUE"
if 'default' in definition:
sql_str += " DEFAULT " + definition['default']
self._columns.append(column)
_logger.info("Column: {}, SQL Definition: {}, Alignment: {}".format(column, sql_str, definition['alignment']))
columns.append(sql.Identifier(column) + sql.SQL(sql_str))
sql_str = _TABLE_CREATE_SQL.format(
self._table, sql.SQL(", ").join(columns))
_logger.info(text_token(
{'I05000': {'sql': self._sql_to_string(sql_str)}}))
try:
self._db_transaction((sql_str,), read=False)
except ProgrammingError as e:
if e.pgcode == errors.DuplicateTable:
_logger.info(text_token({'I05001': {'table': self.config['table'], 'dbname': self.config['database']}}))
return self._table_definition()
raise e
self._create_indices()
self.creator = True
self._populate_table()
return self._table_definition()
def _create_indices(self):
"""Create an index for columns that specify one."""
for column, definition in filter(lambda x: 'index' in x[1], self.config['schema'].items()):
sql_str = _TABLE_INDEX_SQL.format(sql.Identifier(column + "_index"), self._table)
sql_str += sql.SQL(" USING ") + sql.Identifier(definition['index'])
sql_str += _TABLE_INDEX_COLUMN_SQL.format(sql.Identifier(column))
_logger.info(text_token({'I05000': {'sql': self._sql_to_string(sql_str)}}))
self._db_transaction((sql_str,), read=False)
def delete_table(self):
"""Delete the table."""
if self._db_exists():
sql_str = _TABLE_DELETE_TABLE_SQL.format(self._table)
_logger.info(text_token({'I05000': {'sql': self._sql_to_string(sql_str)}}))
self._db_transaction((sql_str,), read=False)
def _sql_queries_transaction(self, sql_str_list, repeatable=False):
if _logit():
_logger.debug(text_token({'I05000': {'sql': '\n'.join([self._sql_to_string(s) for s in sql_str_list])}}))
cursors = self._db_transaction(sql_str_list, repeatable)
data = tuple((dbcur.fetchall() for dbcur in cursors))
for cursor in cursors:
cursor.close()
return data
def select(self, query_str='', literals={}, columns='*', repeatable=False):
"""Select columns to return for rows matching query_str.
Args
----
query_str (str): Query SQL: SQL starting 'WHERE ' using '{column/literal}' for identifiers/literals.
e.g. '{column1} = {one} ORDER BY {column1} ASC' where 'column1' is a column name and 'one' is a key
in literals. If literals = {'one': 1}, columns = ('column1', 'column3') and the table name is
'test_table' the example query_str would result in the following SQL:
SELECT "column1", "column3" FROM "test_table" WHERE "column1" = 1 ORDER BY "column1" ASC
literals (dict): Keys are labels used in query_str. Values are literals to replace the labels.
columns (iter(str) or str): The columns to be returned on update if an iterable of str.
If '*' all columns are returned. If another str interpreted as formatted SQL after 'SELECT'
and before 'FROM' as query_str.
repeatable (bool): If True select transaction is done with repeatable read isolation.
Returns
-------
(list(tuple)): An list of the values specified by columns for the specified query_str.
"""
if columns == '*':
columns = self._columns
format_dict = self._format_dict(literals)
if isinstance(columns, str):
columns = sql.SQL(columns).format(**format_dict)
else:
columns = sql.SQL(', ').join(map(sql.Identifier, columns))
sql_str_list = [_TABLE_SELECT_SQL.format(columns, self._table, sql.SQL(query_str).format(**format_dict))]
return self._sql_queries_transaction(sql_str_list, repeatable)[0]
def recursive_select(self, query_str, literals={}, columns='*', repeatable=False):
"""Recursive select of columns to return for rows matching query_str.
Recursion is defined by the ptr_map (pointer map) in the table config.
If the rows in the table define nodes in a graph then the pointer map defines
the edges between nodes.
self.config['ptr_map'] is of the form {
"column X": "column Y",
...
}
where column X contains a reference to a node identified by column Y.
Recursive select will return all the rows defined by the query_str plus the union of any rows
they point to and the rows those rows point to...recursively until no references are left (or
are not in the table).
Args
----
query_str (str): Query SQL: See select() for details.
literals (dict): Keys are labels used in query_str. Values are literals to replace the labels.
columns (iter): The columns to be returned on update. If '*' defined all columns are returned.
repeatable (bool): If True select transaction is done with repeatable read isolation.
Returns
-------
(list(tuple)): An list of the values specified by columns for the specified recursive query_str
and pointer map.
"""
if columns == '*':
columns = self._columns
else:
columns = list(columns)
for ptr in self._pm_columns:
if ptr not in columns:
columns.append(ptr)
t_columns = sql.SQL('t.') + sql.SQL(', t.').join(map(sql.Identifier, columns))
columns = sql.SQL(', ').join(map(sql.Identifier, columns))
format_dict = self._format_dict(literals)
sql_str_list = [_TABLE_RECURSIVE_SELECT.format(columns, self._table, sql.SQL(
query_str).format(**format_dict), t_columns, self._pm_sql)]
return self._sql_queries_transaction(sql_str_list, repeatable)[0]
def _format_dict(self, literals):
"""Create a formatting dict of literals and column identifiers."""
dupes = [literal for literal in filter(lambda x: x in self._columns, literals.keys())]
if dupes:
raise ValueError("Literals cannot have keys that are the names of table columns:{}".format(dupes))
format_dict = {k: sql.Identifier(k) for k in self._columns}
format_dict.update({k: sql.Literal(v) for k, v in literals.items()})
return format_dict
# TODO: This could overflow an SQL statement size limit. In which case
# should we use a COPY https://www.postgresql.org/docs/12/dml-insert.html
def upsert(self, columns, values, update_str=None, literals={}, returning=tuple()):
"""Upsert values.
If update_str is None each entry will be inserted or replace the existing entry on conflict.
In this case literals is not used.
Args
----
columns (iter(str)): Column names for each of the rows in values.
values (iter(tuple/list)): Iterable of rows (ordered iterables) with values in the order as columns.
update_str (str): Update SQL: SQL after 'UPDATE SET ' using '{column/literal}' for identifiers/literals.
e.g. '{column1} = {EXCLUDED.column1} + {one}' where 'column1' is a column name and 'one' is a key
in literals. Prepend 'EXCLUDED.' to read the existing value. If columns = ['column1'] and
values = [(10,)], literals = {'one': 1} and the table name is 'test_table' the example update_str
would result in the following SQL:
INSERT INTO "test_table" "column1" VALUES(10) ON CONFLICT DO
UPDATE SET "column1" = EXCLUDED."column1" + 1
literals (dict): Keys are labels used in update_str. Values are literals to replace the labels.
returning (iter): The columns to be returned on update. If None or empty no columns will be returned.
Returns
-------
(list(tuple)): An list of the values specified by returning for each updated row or [] if returning is
an empty iterable or None.
"""
if not values:
return []
if returning == '*':
returning = self._columns
if update_str is None:
update_str = ",".join((_DEFAULT_UPDATE_STR.format(k) for k in columns if k != self._primary_key))
if update_str != _TABLE_INSERT_CONFLICT_STR:
if self._primary_key is None:
raise ValueError('Can only upsert if a primary key is defined.')
update_str = _TABLE_UPSERT_CONFLICT_STR.format('({' + self._primary_key + '})') + update_str
columns_sql = sql.SQL(",").join([sql.Identifier(k) for k in columns])
values_sql = sql.SQL(",").join((sql.SQL("({0})").format(
sql.SQL(",").join((sql.Literal(value) for value in row))) for row in values))
format_dict = self._format_dict(literals)
format_dict.update({'EXCLUDED.' + k: sql.SQL('EXCLUDED.') + sql.Identifier(k) for k in columns})
update_sql | |
0.9872000217437744,
"checkpoint": 2323
},
{
"value": 0.9904000163078308,
"checkpoint": 3471
},
{
"value": 0.9912999868392944,
"checkpoint": 4606
},
{
"value": 0.9919999837875366,
"checkpoint": 5785
},
{
"value": 0.991599977016449,
"checkpoint": 6956
},
{
"value": 0.992900013923645,
"checkpoint": 8116
},
{
"value": 0.9922000169754028,
"checkpoint": 9262
},
{
"value": 0.9930999875068665,
"checkpoint": 10397
},
{
"value": 0.9926000237464905,
"checkpoint": 11566
},
{
"value": 0.991100013256073,
"checkpoint": 12000
}
]
},
{
"operation": "amax",
"checkpoints": [
{
"value": 0.9804999828338623,
"checkpoint": 1175
},
{
"value": 0.9872000217437744,
"checkpoint": 2323
},
{
"value": 0.9904000163078308,
"checkpoint": 3471
},
{
"value": 0.9912999868392944,
"checkpoint": 4606
},
{
"value": 0.9919999837875366,
"checkpoint": 5785
},
{
"value": 0.991599977016449,
"checkpoint": 6956
},
{
"value": 0.992900013923645,
"checkpoint": 8116
},
{
"value": 0.9922000169754028,
"checkpoint": 9262
},
{
"value": 0.9930999875068665,
"checkpoint": 10397
},
{
"value": 0.9926000237464905,
"checkpoint": 11566
},
{
"value": 0.991100013256073,
"checkpoint": 12000
}
]
},
{
"operation": "median",
"checkpoints": [
{
"value": 0.9804999828338623,
"checkpoint": 1175
},
{
"value": 0.9872000217437744,
"checkpoint": 2323
},
{
"value": 0.9904000163078308,
"checkpoint": 3471
},
{
"value": 0.9912999868392944,
"checkpoint": 4606
},
{
"value": 0.9919999837875366,
"checkpoint": 5785
},
{
"value": 0.991599977016449,
"checkpoint": 6956
},
{
"value": 0.992900013923645,
"checkpoint": 8116
},
{
"value": 0.9922000169754028,
"checkpoint": 9262
},
{
"value": 0.9930999875068665,
"checkpoint": 10397
},
{
"value": 0.9926000237464905,
"checkpoint": 11566
},
{
"value": 0.991100013256073,
"checkpoint": 12000
}
]
},
{
"operation": "std",
"checkpoints": [
{
"value": 0,
"checkpoint": 1175
},
{
"value": 0,
"checkpoint": 2323
},
{
"value": 0,
"checkpoint": 3471
},
{
"value": 0,
"checkpoint": 4606
},
{
"value": 0,
"checkpoint": 5785
},
{
"value": 0,
"checkpoint": 6956
},
{
"value": 0,
"checkpoint": 8116
},
{
"value": 0,
"checkpoint": 9262
},
{
"value": 0,
"checkpoint": 10397
},
{
"value": 0,
"checkpoint": 11566
},
{
"value": 0,
"checkpoint": 12000
}
]
},
{
"operation": "var",
"checkpoints": [
{
"value": 0,
"checkpoint": 1175
},
{
"value": 0,
"checkpoint": 2323
},
{
"value": 0,
"checkpoint": 3471
},
{
"value": 0,
"checkpoint": 4606
},
{
"value": 0,
"checkpoint": 5785
},
{
"value": 0,
"checkpoint": 6956
},
{
"value": 0,
"checkpoint": 8116
},
{
"value": 0,
"checkpoint": 9262
},
{
"value": 0,
"checkpoint": 10397
},
{
"value": 0,
"checkpoint": 11566
},
{
"value": 0,
"checkpoint": 12000
}
]
}
],
"scalar": "accuracy"
}
],
"session": "eval"
}
],
"params": None,
"url": "s3://ps-projects-development/prmr22ve0/ehla1kvbwzaco/model/",
"notes": None,
"isDeleted": False,
"isPublic": False,
"dtCreated": "2019-04-02T17:02:47.157Z",
"dtModified": "2019-04-02T17:02:54.273Z",
"dtUploaded": "2019-04-02T17:02:54.273Z",
"dtDeleted": None,
"modelPath": "/artifacts"
}
],
"total": 1,
"displayTotal": 1
}
LIST_OF_LOGS_FOR_JOB = [
{
"line": 1,
"timestamp": "2019-04-03T15:56:35.457Z",
"message": "Traceback (most recent call last):"
}, {
"line": 2,
"timestamp": "2019-04-03T15:56:35.458Z",
"message": " File \"generate_figures.py\", line 15, in <module>"
}, {
"line": 3,
"timestamp": "2019-04-03T15:56:35.458Z",
"message": " import dnnlib.tflib as tflib"
}, {
"line": 4,
"timestamp": "2019-04-03T15:56:35.458Z",
"message": " File \"/paperspace/dnnlib/tflib/__init__.py\", line 8, in <module>"
}, {
"line": 5,
"timestamp": "2019-04-03T15:56:35.458Z",
"message": " from . import autosummary"
}, {
"line": 6,
"timestamp": "2019-04-03T15:56:35.458Z",
"message": " File \"/paperspace/dnnlib/tflib/autosummary.py\", line 31, in <module>"
}, {
"line": 7,
"timestamp": "2019-04-03T15:56:35.458Z",
"message": " from . import tfutil"
}, {
"line": 8,
"timestamp": "2019-04-03T15:56:35.458Z",
"message": " File \"/paperspace/dnnlib/tflib/tfutil.py\", line 34, in <module>"
}, {
"line": 9,
"timestamp": "2019-04-03T15:56:35.458Z",
"message": " def shape_to_list(shape: Iterable[tf.Dimension]) -> List[Union[int, None]]:"
}, {
"line": 10,
"timestamp": "2019-04-03T15:56:35.458Z",
"message": "AttributeError: module \'tensorflow\' has no attribute \'Dimension\'"
}, {
"line": 11,
"timestamp": "2019-04-03T15:56:46.168Z",
"message": "PSEOF"
}
]
LIST_HYPERPARAMETERS_RESPONSE_JSON = {
"data": [
{
"dtCreated": "2019-05-13T11:29:43.155736+00:00",
"dtDeleted": None,
"dtFinished": None,
"dtModified": "2019-05-13T11:29:43.155736+00:00",
"dtProvisioningFinished": None,
"dtProvisioningStarted": None,
"dtStarted": None,
"dtTeardownFinished": None,
"dtTeardownStarted": None,
"experimentError": None,
"experimentTemplateHistoryId": 45987,
"experimentTemplateId": 8,
"experimentTypeId": 4,
"handle": "es3dn6fu16r4kk",
"id": 45980,
"projectHandle": "pr4yxj956",
"projectId": 14898,
"started_by_user_id": 199654,
"state": 1,
"templateHistory": {
"dtCreated": "2019-05-13T11:29:41.933178+00:00",
"dtDeleted": None,
"experimentTemplateId": 8,
"id": 45987,
"params": {
"is_preemptible": False,
"name": "some_name",
"ports": 5000,
"project_handle": "pr4yxj956",
"tuning_command": "some command",
"worker_command": "some worker command",
"worker_container": "some_container",
"worker_count": 1,
"worker_machine_type": "k80",
"worker_use_dockerfile": False,
"workspaceUrl": "none"
},
"triggerEvent": None,
"triggerEventId": None
}
},
{
"dtCreated": "2019-05-13T11:29:40.196982+00:00",
"dtDeleted": None,
"dtFinished": None,
"dtModified": "2019-05-13T11:29:40.196982+00:00",
"dtProvisioningFinished": None,
"dtProvisioningStarted": None,
"dtStarted": None,
"dtTeardownFinished": None,
"dtTeardownStarted": None,
"experimentError": None,
"experimentTemplateHistoryId": 45986,
"experimentTemplateId": 8,
"experimentTypeId": 4,
"handle": "eshlqek7wzvrxa",
"id": 45979,
"projectHandle": "pr4yxj956",
"projectId": 14898,
"started_by_user_id": 199654,
"state": 1,
"templateHistory": {
"dtCreated": "2019-05-13T11:29:38.882999+00:00",
"dtDeleted": None,
"experimentTemplateId": 8,
"id": 45986,
"params": {
"is_preemptible": False,
"name": "some_name",
"ports": 5000,
"project_handle": "pr4yxj956",
"tuning_command": "some command",
"worker_command": "some worker command",
"worker_container": "some_container",
"worker_count": 1,
"worker_machine_type": "k80",
"worker_use_dockerfile": False,
"workspaceUrl": "none"
},
"triggerEvent": None,
"triggerEventId": None
}
},
{
"dtCreated": "2019-05-13T11:25:03.760490+00:00",
"dtDeleted": None,
"dtFinished": None,
"dtModified": "2019-05-13T11:25:03.760490+00:00",
"dtProvisioningFinished": None,
"dtProvisioningStarted": None,
"dtStarted": None,
"dtTeardownFinished": None,
"dtTeardownStarted": None,
"experimentError": None,
"experimentTemplateHistoryId": 45985,
"experimentTemplateId": 8,
"experimentTypeId": 4,
"handle": "esdwnui5qsk8qm",
"id": 45978,
"projectHandle": "pr4yxj956",
"projectId": 14898,
"started_by_user_id": 199654,
"state": 1,
"templateHistory": {
"dtCreated": "2019-05-13T11:25:02.489045+00:00",
"dtDeleted": None,
"experimentTemplateId": 8,
"id": 45985,
"params": {
"is_preemptible": False,
"name": "some_name",
"ports": 5000,
"project_handle": "pr4yxj956",
"tuning_command": "some command",
"worker_command": "some worker command",
"worker_container": "some_container",
"worker_count": 1,
"worker_machine_type": "k80",
"worker_use_dockerfile": False,
"workspaceUrl": "none"
},
"triggerEvent": None,
"triggerEventId": None
}
},
{
"dtCreated": "2019-05-13T11:23:53.803162+00:00",
"dtDeleted": None,
"dtFinished": None,
"dtModified": "2019-05-13T11:23:53.803162+00:00",
"dtProvisioningFinished": None,
"dtProvisioningStarted": None,
"dtStarted": None,
"dtTeardownFinished": None,
"dtTeardownStarted": None,
"experimentError": None,
"experimentTemplateHistoryId": 45984,
"experimentTemplateId": 8,
"experimentTypeId": 4,
"handle": "eshz1z9k37w4nm",
"id": 45977,
"projectHandle": "pr4yxj956",
"projectId": 14898,
"started_by_user_id": 199654,
"state": 1,
"templateHistory": {
"dtCreated": "2019-05-13T11:23:52.778310+00:00",
"dtDeleted": None,
"experimentTemplateId": 8,
"id": 45984,
"params": {
"is_preemptible": False,
"name": "some_name",
"ports": 5000,
"project_handle": "pr4yxj956",
"tuning_command": "some command",
"worker_command": "some worker command",
"worker_container": "some_container",
"worker_count": 1,
"worker_machine_type": "k80",
"worker_use_dockerfile": False,
"workspaceUrl": "none"
},
"triggerEvent": None,
"triggerEventId": None
}
},
],
"message": "success",
"meta": {
"filter": [],
"limit": 11,
"offset": 0,
"totalItems": 15
}
}
HYPERPARAMETERS_DETAILS_RESPONSE_JSON = {
"data": {
"dtCreated": "2019-05-13T10:57:32.828135+00:00",
"dtDeleted": None,
"dtFinished": None,
"dtModified": "2019-05-13T10:57:32.828135+00:00",
"dtProvisioningFinished": None,
"dtProvisioningStarted": None,
"dtStarted": None,
"dtTeardownFinished": None,
"dtTeardownStarted": None,
"experimentError": None,
"experimentTemplateHistoryId": 45973,
"experimentTemplateId": 8,
"experimentTypeId": 4,
"handle": "ess6t3fjs2hb1g",
"id": 45966,
"projectHandle": "pr4yxj956",
"projectId": 14898,
"started_by_user_id": 199654,
"state": 1,
"templateHistory": {
"dtCreated": "2019-05-13T10:57:31.592657+00:00",
"dtDeleted": None,
"experimentTemplateId": 8,
"id": 45973,
"params": {
"is_preemptible": False,
"name": "some_name",
"ports": 5000,
"project_handle": "pr4yxj956",
"tuning_command": "some command",
"worker_command": "some worker command",
"worker_container": "some_container",
"worker_count": 1,
"worker_machine_type": "k80",
"worker_use_dockerfile": False,
"workspaceUrl": "none"
},
"triggerEvent": None,
"triggerEventId": None
}
},
"message": "success"
}
LIST_OF_LOGS_FOR_EXPERIMENT = [
{
"jobId": "jsy2ibsz1l026y",
"line": 1,
"timestamp": "2019-07-08T12:40:59.139Z",
"message": "2019-07-08 12:40:59.139494: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA\n"
},
{
"jobId": "jsy2ibsz1l026y",
"line": 2,
"timestamp": "2019-07-08T12:40:59.271Z",
"message": "2019-07-08 12:40:59.270783: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:998] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero"
},
{
"jobId": "jsy2ibsz1l026y",
"line": 3,
"timestamp": "2019-07-08T12:40:59.271Z",
"message": "2019-07-08 12:40:59.271379: I tensorflow/compiler/xla/service/service.cc:150] XLA service 0x5642a00 executing computations on platform CUDA. Devices:"
},
{
"jobId": "jsy2ibsz1l026y",
"line": 4,
"timestamp": "2019-07-08T12:40:59.271Z",
"message": "2019-07-08 12:40:59.271427: I tensorflow/compiler/xla/service/service.cc:158] StreamExecutor device (0): Tesla K80, Compute Capability 3.7"
},
{
"jobId": "jsy2ibsz1l026y",
"line": 5,
"timestamp": "2019-07-08T12:40:59.274Z",
"message": "2019-07-08 12:40:59.274589: I tensorflow/core/platform/profile_utils/cpu_utils.cc:94] CPU Frequency: 2200000000 Hz"
},
{
"jobId": "jsy2ibsz1l026y",
"line": 6,
"timestamp": "2019-07-08T12:40:59.275Z",
"message": "2019-07-08 12:40:59.274952: I tensorflow/compiler/xla/service/service.cc:150] XLA service 0x56aba20 executing computations on platform Host. Devices:"
},
{
"jobId": "jsy2ibsz1l026y",
"line": 7,
"timestamp": "2019-07-08T12:40:59.275Z",
"message": "2019-07-08 12:40:59.274983: I tensorflow/compiler/xla/service/service.cc:158] StreamExecutor device (0): \u003cundefined\u003e, \u003cundefined\u003e"
},
{
"jobId": "jsy2ibsz1l026y",
"line": 8,
"timestamp": "2019-07-08T12:40:59.275Z",
"message": "2019-07-08 12:40:59.275358: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1433] Found device 0 with properties: "
},
{
"jobId": "jsy2ibsz1l026y",
"line": 9,
"timestamp": "2019-07-08T12:40:59.275Z",
"message": "name: Tesla K80 major: 3 minor: 7 memoryClockRate(GHz): 0.8235"
},
{
"jobId": "jsy2ibsz1l026y",
"line": 10,
"timestamp": "2019-07-08T12:40:59.275Z",
"message": "pciBusID: 0000:00:04.0"
},
{
"jobId": "jsy2ibsz1l026y",
"line": 11,
"timestamp": "2019-07-08T12:40:59.275Z",
"message": "totalMemory: 11.17GiB freeMemory: 11.09GiB"
},
{
"jobId": "jsy2ibsz1l026y",
"line": 12,
"timestamp": "2019-07-08T12:40:59.275Z",
"message": "2019-07-08 12:40:59.275392: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1512] Adding visible gpu devices: 0"
},
{
"jobId": "jsy2ibsz1l026y",
"line": 13,
"timestamp": "2019-07-08T12:40:59.276Z",
"message": "2019-07-08 12:40:59.276439: I tensorflow/core/common_runtime/gpu/gpu_device.cc:984] Device interconnect StreamExecutor with strength 1 edge matrix:"
},
{
"jobId": "jsy2ibsz1l026y",
"line": 14,
"timestamp": "2019-07-08T12:40:59.276Z",
"message": "2019-07-08 12:40:59.276473: I tensorflow/core/common_runtime/gpu/gpu_device.cc:990] 0 "
},
{
"jobId": "jsy2ibsz1l026y",
"line": 15,
"timestamp": "2019-07-08T12:40:59.276Z",
"message": "2019-07-08 12:40:59.276483: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1003] 0: N "
},
{
"jobId": "jsy2ibsz1l026y",
"line": 16,
"timestamp": "2019-07-08T12:40:59.276Z",
"message": "2019-07-08 12:40:59.276778: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1115] Created TensorFlow device | |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import json
import tempfile
from collections import Counter
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.predictor.predictor_exporter as pe
import hypothesis.strategies as st
import numpy as np
import torch
import torch.nn.functional as F
from caffe2.python import workspace
from hypothesis import given, settings
from pytext.builtin_task import (
DocumentClassificationTask,
IntentSlotTask,
SeqNNTask,
WordTaggingTask,
)
from pytext.common.constants import DatasetFieldName, SpecialTokens
from pytext.config import config_from_json
from pytext.config.component import create_exporter, create_model
from pytext.data import CommonMetadata
from pytext.data.utils import Vocabulary
from pytext.exporters.exporter import ModelExporter
from pytext.fields import (
CharFeatureField,
DictFeatureField,
FieldMeta,
SeqFeatureField,
TextFeatureField,
)
from pytext.task.new_task import _NewTask
from pytext.utils.onnx import CAFFE2_DB_TYPE
from torchtext.vocab import Vocab
JOINT_CONFIG = """
{
"model": {
"representation": {
"BiLSTMDocSlotAttention": {
"lstm": {
"BiLSTM": {
"lstm_dim": 30,
"num_layers": 1
}
},
"pooling": {
"SelfAttention": {
"attn_dimension": 30,
"dropout": 0.3
}
}
}
},
"decoder": {
"use_doc_probs_in_word": true
},
"output_layer": {
"doc_output": {
"loss": {
"CrossEntropyLoss": {}
}
},
"word_output": {
"CRFOutputLayer": {}
}
}
}
}
"""
DOC_CONFIGS = [
"""
{
"model": {
"representation": {
"DocNNRepresentation": {}
},
"output_layer": {
"loss": {
"CrossEntropyLoss": {}
}
}
},
"features": {
"word_feat": {},
"dict_feat": {},
"char_feat": {
"embed_dim": 5,
"cnn": {
"kernel_num": 2,
"kernel_sizes": [2, 3]
}
},
"dense_feat": {
"dim":10
}
},
"featurizer": {
"SimpleFeaturizer": {}
},
"trainer": {
"epochs": 1
},
"exporter": {}
}
""",
"""
{
"model": {
"representation": {
"BiLSTMDocAttention": {
"pooling": {
"MaxPool": {}
}
}
},
"output_layer": {
"loss": {
"CrossEntropyLoss": {}
}
}
},
"features": {
"dict_feat": {
"embed_dim": 10
}
},
"featurizer": {
"SimpleFeaturizer": {}
},
"trainer": {
"epochs": 1
},
"exporter": {}
}
""",
"""
{
"model": {
"representation": {
"DocNNRepresentation": {}
},
"output_layer": {
"loss": {
"CrossEntropyLoss": {}
}
}
},
"features": {
"word_feat": {},
"dict_feat": {},
"char_feat": {
"embed_dim": 5,
"cnn": {
"kernel_num": 2,
"kernel_sizes": [2, 3]
}
}
},
"featurizer": {
"SimpleFeaturizer": {}
},
"trainer": {
"epochs": 1
},
"exporter": {}
}
""",
]
DOC_CONFIGS_WITH_EXPORT_LOGITS = [
"""
{
"model": {
"representation": {
"BiLSTMDocAttention": {
"pooling": {
"MaxPool": {}
}
}
},
"output_layer": {
"loss": {
"CrossEntropyLoss": {}
}
}
},
"features": {
"dict_feat": {
"embed_dim": 10
}
},
"featurizer": {
"SimpleFeaturizer": {}
},
"trainer": {
"epochs": 1
},
"exporter": {
"export_logits": true
}
}
"""
]
WORD_CONFIGS = [
"""
{
"model": {
"representation": {
"BiLSTMSlotAttention": {
"lstm": {
"lstm_dim": 30,
"num_layers": 2
}
}
},
"output_layer": {
"WordTaggingOutputLayer": {}
}
}
}
""",
"""
{
"model": {
"representation": {
"BiLSTMSlotAttention": {
"lstm": {
"lstm_dim": 30,
"num_layers": 2
}
}
},
"output_layer": {
"CRFOutputLayer": {}
}
}
}
""",
]
SEQ_NN_CONFIG = """
{
"model": {
"representation": {
"doc_representation": {},
"seq_representation": {
"DocNNRepresentation": {}
}
}
}
}
"""
CONTEXTUAL_INTENT_SLOT_CONFIG = """
{
"trainer": {
"epochs": 1
},
"metric_reporter": {
"IntentSlotMetricReporter": {}
},
"model": {
"ContextualIntentSlotModel": {
"inputs": {
"tokens": {
},
"seq_tokens": {}
},
"word_embedding": {
"embed_dim": 10
},
"seq_embedding": {
"embed_dim": 10
}
}
}
}
"""
WORD_VOCAB = [SpecialTokens.UNK, "W1", "W2", "W3", "W4", "W5", "W6", "W7", "W8", "W9"]
W_VOCAB_SIZE = 10
UNK_IDX = 0
PAD_IDX = 1
W_VOCAB = ["<UNK>", "W1", "W2", "W3", "W4", "W5", "W6", "W7", "W8", "W9"]
DICT_VOCAB_SIZE = 10
DICT_VOCAB = ["<UNK>", "D1", "D2", "D3", "D4", "D5", "D6", "D7", "D8", "D9"]
CHAR_VOCAB_SIZE = 10
CHAR_VOCAB = ["<UNK>", "C1", "C2", "C3", "C4", "C5", "C6", "C7", "C8", "C9"]
# For now we need to fix the batch_size for exporting and testing,
# Need to remove this and make it a random input once ONNX is able to
# Handle different batch_sizes
BATCH_SIZE = 1
# Fixed dimension of dense_features since it needs to be specified in config
DENSE_FEATURE_DIM = 10
class ModelExporterTest(hu.HypothesisTestCase):
@given(
export_num_words=st.integers(1, 5),
export_num_dict_feat=st.integers(1, 6),
num_doc_classes=st.integers(2, 5),
test_num_words=st.integers(1, 7),
test_num_dict_feat=st.integers(1, 8),
num_predictions=st.integers(1, 4),
test_num_chars=st.integers(1, 7),
)
# TODO () Port this test to DocumentClassificationTask
def DISABLED_test_doc_export_to_caffe2(
self,
export_num_words,
export_num_dict_feat,
num_doc_classes,
test_num_words,
test_num_dict_feat,
num_predictions,
test_num_chars,
):
for config in DOC_CONFIGS:
config = self._get_config(DocumentClassificationTask.Config, config)
metadata = self._get_metadata(num_doc_classes, 0)
py_model = create_model(config.model, config.features, metadata)
exporter = create_exporter(
config.exporter, config.features, config.labels, metadata
)
with tempfile.NamedTemporaryFile(
delete=False, suffix=".predictor"
) as pred_file:
print(pred_file.name)
output_names = exporter.export_to_caffe2(py_model, pred_file.name)
workspace.ResetWorkspace()
pred_net = pe.prepare_prediction_net(pred_file.name, CAFFE2_DB_TYPE)
for _i in range(num_predictions):
pred_net = pe.prepare_prediction_net(pred_file.name, CAFFE2_DB_TYPE)
test_inputs = self._get_rand_input(
config.features,
BATCH_SIZE,
W_VOCAB_SIZE,
DICT_VOCAB_SIZE,
CHAR_VOCAB_SIZE,
test_num_words,
test_num_dict_feat,
test_num_chars,
)
self._feed_c2_input(
workspace,
test_inputs,
exporter.input_names,
metadata.feature_itos_map,
)
workspace.RunNetOnce(pred_net)
c2_out = [list(workspace.FetchBlob(o_name)) for o_name in output_names]
py_model.eval()
py_outs = py_model(*test_inputs)
# Do log_softmax since we do that before exporting predictor nets
py_outs = F.log_softmax(py_outs, 1)
np.testing.assert_array_almost_equal(
py_outs.view(-1).detach().numpy(), np.array(c2_out).flatten()
)
@given(
num_doc_classes=st.integers(2, 5),
test_num_words=st.integers(1, 7),
test_num_dict_feat=st.integers(1, 8),
num_predictions=st.integers(1, 4),
test_num_chars=st.integers(1, 7),
)
# TODO () Port this test to DocumentClassificationTask
def DISABLED_test_doc_export_to_caffe2_with_logits(
self,
num_doc_classes,
test_num_words,
test_num_dict_feat,
num_predictions,
test_num_chars,
):
for config in DOC_CONFIGS_WITH_EXPORT_LOGITS:
config = self._get_config(DocumentClassificationTask.Config, config)
metadata = self._get_metadata(num_doc_classes, 0)
py_model = create_model(config.model, config.features, metadata)
exporter = create_exporter(
config.exporter, config.features, config.labels, metadata
)
with tempfile.NamedTemporaryFile(
delete=False, suffix=".predictor"
) as pred_file:
print(pred_file.name)
output_names = exporter.export_to_caffe2(py_model, pred_file.name)
workspace.ResetWorkspace()
pred_net = pe.prepare_prediction_net(pred_file.name, CAFFE2_DB_TYPE)
for _i in range(num_predictions):
pred_net = pe.prepare_prediction_net(pred_file.name, CAFFE2_DB_TYPE)
test_inputs = self._get_rand_input(
config.features,
BATCH_SIZE,
W_VOCAB_SIZE,
DICT_VOCAB_SIZE,
CHAR_VOCAB_SIZE,
test_num_words,
test_num_dict_feat,
test_num_chars,
)
self._feed_c2_input(
workspace,
test_inputs,
exporter.input_names,
metadata.feature_itos_map,
)
workspace.RunNetOnce(pred_net)
c2_out = [list(workspace.FetchBlob(o_name)) for o_name in output_names]
py_model.eval()
py_outs = py_model(*test_inputs)
np.testing.assert_array_almost_equal(
py_outs.view(-1).detach().numpy(), np.array(c2_out[-1]).flatten()
)
# Do log_softmax since we do that before exporting predictor nets
py_outs = F.log_softmax(py_outs, 1)
np.testing.assert_array_almost_equal(
py_outs.view(-1).detach().numpy(), np.array(c2_out[:-1]).flatten()
)
@given(
export_num_words=st.integers(1, 5),
num_word_classes=st.integers(2, 5),
test_num_words=st.integers(1, 7),
num_predictions=st.integers(2, 5),
)
@settings(max_examples=10, deadline=None)
def test_wordblstm_export_to_caffe2(
self, export_num_words, num_word_classes, test_num_words, num_predictions
):
for WORD_CONFIG in WORD_CONFIGS:
config = self._get_config(WordTaggingTask.Config, WORD_CONFIG)
tensorizers, data = _NewTask._init_tensorizers(config)
word_labels = [SpecialTokens.PAD, SpecialTokens.UNK, "NoLabel", "person"]
tensorizers["labels"].vocab = Vocabulary(word_labels)
tensorizers["tokens"].vocab = Vocabulary(WORD_VOCAB)
py_model = _NewTask._init_model(config.model, tensorizers)
dummy_test_input = self._get_rand_input_intent_slot(
BATCH_SIZE, W_VOCAB_SIZE, test_num_words
)
exporter = ModelExporter(
ModelExporter.Config(),
py_model.get_export_input_names(tensorizers),
dummy_test_input,
py_model.vocab_to_export(tensorizers),
py_model.get_export_output_names(tensorizers),
)
with tempfile.NamedTemporaryFile(
delete=False, suffix=".{}".format(".predictor")
) as pred_file:
exporter.export_to_caffe2(py_model, pred_file.name)
workspace.ResetWorkspace()
pred_net = pe.prepare_prediction_net(pred_file.name, CAFFE2_DB_TYPE)
for _i in range(num_predictions):
test_inputs = self._get_rand_input_intent_slot(
BATCH_SIZE, W_VOCAB_SIZE, test_num_words
)
self._feed_c2_input(
workspace, test_inputs, exporter.input_names, exporter.vocab_map
)
workspace.RunNetOnce(pred_net)
word_output_names = [
"{}:{}".format("word_scores", class_name)
for class_name in word_labels
]
py_model.eval()
py_outs = py_model(*test_inputs)
context = {"seq_lens": test_inputs[-1]}
target = None
pred, score = py_model.get_pred(py_outs, target, context)
c2_word_out = []
for o_name in word_output_names:
c2_word_out.extend(list(workspace.FetchBlob(o_name)))
np.testing.assert_array_almost_equal(
torch.transpose(score, 1, 2).contiguous().view(-1).detach().numpy(),
np.array(c2_word_out).flatten(),
)
def _get_rand_input_intent_slot(
self, batch_size, w_vocab_size, num_words, num_seq=0
):
text = torch.from_numpy(
np.random.randint(w_vocab_size, size=(batch_size, num_words)).astype(
np.int64
)
)
lengths = torch.from_numpy(
np.random.randint(num_words, num_words + 1, size=(batch_size)).astype(
np.int64
)
)
inputs = [text]
if num_seq > 0:
inputs.append(
torch.from_numpy(
np.random.randint(
w_vocab_size, size=(batch_size, num_seq, num_words)
).astype(np.int64)
)
)
inputs.append(lengths)
if num_seq > 0:
inputs.append(
torch.from_numpy(
np.random.randint(num_seq, num_seq + 1, size=(batch_size)).astype(
np.int64
)
)
)
return tuple(inputs)
@given(
export_num_words=st.integers(1, 5),
num_doc_classes=st.integers(2, 5),
num_word_classes=st.integers(2, 4),
test_num_words=st.integers(1, 7),
num_predictions=st.integers(1, 5),
)
@settings(max_examples=10, deadline=None)
def test_joint_export_to_caffe2(
self,
export_num_words,
num_doc_classes,
num_word_classes,
test_num_words,
num_predictions,
):
config = self._get_config(IntentSlotTask.Config, JOINT_CONFIG)
tensorizers, data = _NewTask._init_tensorizers(config)
doc_labels = [SpecialTokens.UNK, "cu:other", "cu:address_Person"]
word_labels = [SpecialTokens.PAD, SpecialTokens.UNK, "NoLabel", "person"]
tensorizers["word_labels"].vocab = Vocabulary(word_labels)
tensorizers["doc_labels"].vocab = Vocabulary(doc_labels)
tensorizers["tokens"].vocab = Vocabulary(WORD_VOCAB)
py_model = _NewTask._init_model(config.model, tensorizers)
dummy_test_input = self._get_rand_input_intent_slot(
BATCH_SIZE, W_VOCAB_SIZE, test_num_words
)
exporter = ModelExporter(
ModelExporter.Config(),
py_model.get_export_input_names(tensorizers),
dummy_test_input,
py_model.vocab_to_export(tensorizers),
py_model.get_export_output_names(tensorizers),
)
with tempfile.NamedTemporaryFile(
delete=False, suffix=".{}".format(".predictor")
) as pred_file:
exporter.export_to_caffe2(py_model, pred_file.name)
workspace.ResetWorkspace()
pred_net = pe.prepare_prediction_net(pred_file.name, CAFFE2_DB_TYPE)
for _i in range(num_predictions):
test_inputs = self._get_rand_input_intent_slot(
BATCH_SIZE, W_VOCAB_SIZE, test_num_words
)
self._feed_c2_input(
workspace, test_inputs, exporter.input_names, exporter.vocab_map
)
workspace.RunNetOnce(pred_net)
doc_output_names = [
"{}:{}".format("doc_scores", class_name) for class_name in doc_labels
]
word_output_names = [
"{}:{}".format("word_scores", class_name) for class_name in word_labels
]
py_model.eval()
logits = py_model(*test_inputs)
context = {"seq_lens": test_inputs[-1]}
target = None
(d_pred, w_pred), (d_score, w_score) = py_model.get_pred(
logits, target, context
)
c2_doc_out = []
for o_name in doc_output_names:
c2_doc_out.extend(list(workspace.FetchBlob(o_name)))
np.testing.assert_array_almost_equal(
d_score.view(-1).detach().numpy(), np.array(c2_doc_out).flatten()
)
c2_word_out = []
for o_name in word_output_names:
c2_word_out.extend(list(workspace.FetchBlob(o_name)))
np.testing.assert_array_almost_equal(
torch.transpose(w_score, 1, 2).contiguous().view(-1).detach().numpy(),
np.array(c2_word_out).flatten(),
)
@given(
export_num_words=st.integers(1, 5),
num_doc_classes=st.integers(2, 5),
test_num_words=st.integers(1, 7),
num_predictions=st.integers(1, 5),
test_num_seq=st.integers(1, 7),
)
@settings(max_examples=10, deadline=None)
def test_seq_nn_export_to_caffe2(
self,
export_num_words,
num_doc_classes,
test_num_words,
num_predictions,
test_num_seq,
):
config = self._get_config(SeqNNTask.Config, SEQ_NN_CONFIG)
tensorizers, data = _NewTask._init_tensorizers(config)
doc_labels = [SpecialTokens.UNK, "cu:other", "cu:address_Person"]
tensorizers["labels"].vocab = Vocabulary(doc_labels)
tensorizers["tokens"].vocab = Vocabulary(WORD_VOCAB)
py_model = _NewTask._init_model(config.model, tensorizers)
dummy_test_input = self._get_seq_nn_rand_input(
BATCH_SIZE, W_VOCAB_SIZE, test_num_words, test_num_seq
)
exporter = ModelExporter(
ModelExporter.Config(),
py_model.get_export_input_names(tensorizers),
dummy_test_input,
py_model.vocab_to_export(tensorizers),
py_model.get_export_output_names(tensorizers),
)
with tempfile.NamedTemporaryFile(
delete=False, suffix=".{}".format(".predictor")
) as pred_file:
output_names = exporter.export_to_caffe2(py_model, pred_file.name)
workspace.ResetWorkspace()
pred_net = pe.prepare_prediction_net(pred_file.name, CAFFE2_DB_TYPE)
for _i in range(num_predictions):
test_inputs = self._get_seq_nn_rand_input(
BATCH_SIZE, W_VOCAB_SIZE, test_num_words, test_num_seq
)
self._feed_c2_input(
workspace, test_inputs, exporter.input_names, exporter.vocab_map
)
workspace.RunNetOnce(pred_net)
c2_out = [list(workspace.FetchBlob(o_name)) for o_name in output_names]
py_model.eval()
py_outs = py_model(*test_inputs)
# Do log_softmax since we | |
+ m.x1881 <= 9)
m.c5022 = Constraint(expr= 6*m.b201 + m.x1882 <= 6)
m.c5023 = Constraint(expr= 6*m.b202 + m.x1883 <= 6)
m.c5024 = Constraint(expr= 6*m.b203 + m.x1884 <= 6)
m.c5025 = Constraint(expr= 6*m.b204 + m.x1885 <= 6)
m.c5026 = Constraint(expr= 6*m.b205 + m.x1886 <= 6)
m.c5027 = Constraint(expr= 6*m.b206 + m.x1887 <= 6)
m.c5028 = Constraint(expr= 6*m.b207 + m.x1888 <= 6)
m.c5029 = Constraint(expr= 6*m.b208 + m.x1889 <= 6)
m.c5030 = Constraint(expr= 6*m.b209 + m.x1890 <= 6)
m.c5031 = Constraint(expr= 6*m.b210 + m.x1891 <= 6)
m.c5032 = Constraint(expr= 6*m.b211 + m.x1892 <= 6)
m.c5033 = Constraint(expr= 6*m.b212 + m.x1893 <= 6)
m.c5034 = Constraint(expr= 6*m.b213 + m.x1894 <= 6)
m.c5035 = Constraint(expr= 6*m.b214 + m.x1895 <= 6)
m.c5036 = Constraint(expr= 6*m.b215 + m.x1896 <= 6)
m.c5037 = Constraint(expr= 6*m.b216 + m.x1897 <= 6)
m.c5038 = Constraint(expr= 6*m.b217 + m.x1898 <= 6)
m.c5039 = Constraint(expr= 6*m.b218 + m.x1899 <= 6)
m.c5040 = Constraint(expr= 6*m.b219 + m.x1900 <= 6)
m.c5041 = Constraint(expr= 6*m.b220 + m.x1901 <= 6)
m.c5042 = Constraint(expr= 8*m.b221 + m.x1902 <= 8)
m.c5043 = Constraint(expr= 8*m.b222 + m.x1903 <= 8)
m.c5044 = Constraint(expr= 8*m.b223 + m.x1904 <= 8)
m.c5045 = Constraint(expr= 8*m.b224 + m.x1905 <= 8)
m.c5046 = Constraint(expr= 8*m.b225 + m.x1906 <= 8)
m.c5047 = Constraint(expr= 8*m.b226 + m.x1907 <= 8)
m.c5048 = Constraint(expr= 8*m.b227 + m.x1908 <= 8)
m.c5049 = Constraint(expr= 8*m.b228 + m.x1909 <= 8)
m.c5050 = Constraint(expr= 8*m.b229 + m.x1910 <= 8)
m.c5051 = Constraint(expr= 8*m.b230 + m.x1911 <= 8)
m.c5052 = Constraint(expr= 8*m.b231 + m.x1912 <= 8)
m.c5053 = Constraint(expr= 8*m.b232 + m.x1913 <= 8)
m.c5054 = Constraint(expr= 8*m.b233 + m.x1914 <= 8)
m.c5055 = Constraint(expr= 8*m.b234 + m.x1915 <= 8)
m.c5056 = Constraint(expr= 8*m.b235 + m.x1916 <= 8)
m.c5057 = Constraint(expr= 8*m.b236 + m.x1917 <= 8)
m.c5058 = Constraint(expr= 8*m.b237 + m.x1918 <= 8)
m.c5059 = Constraint(expr= 8*m.b238 + m.x1919 <= 8)
m.c5060 = Constraint(expr= 8*m.b239 + m.x1920 <= 8)
m.c5061 = Constraint(expr= 8*m.b240 + m.x1921 <= 8)
m.c5062 = Constraint(expr= 9*m.b241 + m.x1922 <= 9)
m.c5063 = Constraint(expr= 9*m.b242 + m.x1923 <= 9)
m.c5064 = Constraint(expr= 9*m.b243 + m.x1924 <= 9)
m.c5065 = Constraint(expr= 9*m.b244 + m.x1925 <= 9)
m.c5066 = Constraint(expr= 9*m.b245 + m.x1926 <= 9)
m.c5067 = Constraint(expr= 9*m.b246 + m.x1927 <= 9)
m.c5068 = Constraint(expr= 9*m.b247 + m.x1928 <= 9)
m.c5069 = Constraint(expr= 9*m.b248 + m.x1929 <= 9)
m.c5070 = Constraint(expr= 9*m.b249 + m.x1930 <= 9)
m.c5071 = Constraint(expr= 9*m.b250 + m.x1931 <= 9)
m.c5072 = Constraint(expr= 9*m.b251 + m.x1932 <= 9)
m.c5073 = Constraint(expr= 9*m.b252 + m.x1933 <= 9)
m.c5074 = Constraint(expr= 9*m.b253 + m.x1934 <= 9)
m.c5075 = Constraint(expr= 9*m.b254 + m.x1935 <= 9)
m.c5076 = Constraint(expr= 9*m.b255 + m.x1936 <= 9)
m.c5077 = Constraint(expr= 9*m.b256 + m.x1937 <= 9)
m.c5078 = Constraint(expr= 9*m.b257 + m.x1938 <= 9)
m.c5079 = Constraint(expr= 9*m.b258 + m.x1939 <= 9)
m.c5080 = Constraint(expr= 9*m.b259 + m.x1940 <= 9)
m.c5081 = Constraint(expr= 9*m.b260 + m.x1941 <= 9)
m.c5082 = Constraint(expr= 8*m.b261 + m.x1942 <= 8)
m.c5083 = Constraint(expr= 8*m.b262 + m.x1943 <= 8)
m.c5084 = Constraint(expr= 8*m.b263 + m.x1944 <= 8)
m.c5085 = Constraint(expr= 8*m.b264 + m.x1945 <= 8)
m.c5086 = Constraint(expr= 8*m.b265 + m.x1946 <= 8)
m.c5087 = Constraint(expr= 8*m.b266 + m.x1947 <= 8)
m.c5088 = Constraint(expr= 8*m.b267 + m.x1948 <= 8)
m.c5089 = Constraint(expr= 8*m.b268 + m.x1949 <= 8)
m.c5090 = Constraint(expr= 8*m.b269 + m.x1950 <= 8)
m.c5091 = Constraint(expr= 8*m.b270 + m.x1951 <= 8)
m.c5092 = Constraint(expr= 8*m.b271 + m.x1952 <= 8)
m.c5093 = Constraint(expr= 8*m.b272 + m.x1953 <= 8)
m.c5094 = Constraint(expr= 8*m.b273 + m.x1954 <= 8)
m.c5095 = Constraint(expr= 8*m.b274 + m.x1955 <= 8)
m.c5096 = Constraint(expr= 8*m.b275 + m.x1956 <= 8)
m.c5097 = Constraint(expr= 8*m.b276 + m.x1957 <= 8)
m.c5098 = Constraint(expr= 8*m.b277 + m.x1958 <= 8)
m.c5099 = Constraint(expr= 8*m.b278 + m.x1959 <= 8)
m.c5100 = Constraint(expr= 8*m.b279 + m.x1960 <= 8)
m.c5101 = Constraint(expr= 8*m.b280 + m.x1961 <= 8)
m.c5102 = Constraint(expr= 8*m.b281 + m.x1962 <= 8)
m.c5103 = Constraint(expr= 8*m.b282 + m.x1963 <= 8)
m.c5104 = Constraint(expr= 8*m.b283 + m.x1964 <= 8)
m.c5105 = Constraint(expr= 8*m.b284 + m.x1965 <= 8)
m.c5106 = Constraint(expr= 8*m.b285 + m.x1966 <= 8)
m.c5107 = Constraint(expr= 8*m.b286 + m.x1967 <= 8)
m.c5108 = Constraint(expr= 8*m.b287 + m.x1968 <= 8)
m.c5109 = Constraint(expr= 8*m.b288 + m.x1969 <= 8)
m.c5110 = Constraint(expr= 8*m.b289 + m.x1970 <= 8)
m.c5111 = Constraint(expr= 8*m.b290 + m.x1971 <= 8)
m.c5112 = Constraint(expr= 8*m.b291 + m.x1972 <= 8)
m.c5113 = Constraint(expr= 8*m.b292 + m.x1973 <= 8)
m.c5114 = Constraint(expr= 8*m.b293 + m.x1974 <= 8)
m.c5115 = Constraint(expr= 8*m.b294 + m.x1975 <= 8)
m.c5116 = Constraint(expr= 8*m.b295 + m.x1976 <= 8)
m.c5117 = Constraint(expr= 8*m.b296 + m.x1977 <= 8)
m.c5118 = Constraint(expr= 8*m.b297 + m.x1978 <= 8)
m.c5119 = Constraint(expr= 8*m.b298 + m.x1979 <= 8)
m.c5120 = Constraint(expr= 8*m.b299 + m.x1980 <= 8)
m.c5121 = Constraint(expr= 8*m.b300 + m.x1981 <= 8)
m.c5122 = Constraint(expr= 4*m.b301 + m.x1982 <= 4)
m.c5123 = Constraint(expr= 4*m.b302 + m.x1983 <= 4)
m.c5124 = Constraint(expr= 4*m.b303 + m.x1984 <= 4)
m.c5125 = Constraint(expr= 4*m.b304 + m.x1985 <= 4)
m.c5126 = Constraint(expr= 4*m.b305 + m.x1986 <= 4)
m.c5127 = Constraint(expr= 4*m.b306 + m.x1987 <= 4)
m.c5128 = Constraint(expr= 4*m.b307 + m.x1988 <= 4)
m.c5129 = Constraint(expr= 4*m.b308 + m.x1989 <= 4)
m.c5130 = Constraint(expr= 4*m.b309 + m.x1990 <= 4)
m.c5131 = Constraint(expr= 4*m.b310 + m.x1991 <= 4)
m.c5132 = Constraint(expr= 4*m.b311 + m.x1992 <= 4)
m.c5133 = Constraint(expr= 4*m.b312 + m.x1993 <= 4)
m.c5134 = Constraint(expr= 4*m.b313 + m.x1994 <= 4)
m.c5135 = Constraint(expr= 4*m.b314 + m.x1995 <= 4)
m.c5136 = Constraint(expr= 4*m.b315 + m.x1996 <= 4)
m.c5137 = Constraint(expr= 4*m.b316 + m.x1997 <= 4)
m.c5138 = Constraint(expr= 4*m.b317 + m.x1998 <= 4)
m.c5139 = Constraint(expr= 4*m.b318 + m.x1999 <= 4)
m.c5140 = Constraint(expr= 4*m.b319 + m.x2000 <= 4)
m.c5141 = Constraint(expr= 4*m.b320 + m.x2001 <= 4)
m.c5142 = Constraint(expr= 7*m.b321 + m.x2002 <= 7)
m.c5143 = Constraint(expr= 7*m.b322 + m.x2003 <= 7)
m.c5144 = Constraint(expr= 7*m.b323 + m.x2004 <= 7)
m.c5145 = Constraint(expr= 7*m.b324 + m.x2005 <= 7)
m.c5146 = Constraint(expr= 7*m.b325 + m.x2006 <= 7)
m.c5147 = Constraint(expr= 7*m.b326 + m.x2007 <= 7)
m.c5148 = Constraint(expr= 7*m.b327 + m.x2008 <= 7)
m.c5149 = Constraint(expr= 7*m.b328 + m.x2009 <= 7)
m.c5150 = Constraint(expr= 7*m.b329 + m.x2010 <= 7)
m.c5151 = Constraint(expr= 7*m.b330 + m.x2011 <= 7)
m.c5152 = Constraint(expr= 7*m.b331 + m.x2012 <= 7)
m.c5153 = Constraint(expr= 7*m.b332 + m.x2013 <= 7)
m.c5154 = Constraint(expr= 7*m.b333 + m.x2014 <= 7)
m.c5155 = Constraint(expr= 7*m.b334 + m.x2015 <= 7)
m.c5156 = Constraint(expr= 7*m.b335 + m.x2016 <= 7)
m.c5157 = Constraint(expr= 7*m.b336 + m.x2017 <= 7)
m.c5158 = Constraint(expr= 7*m.b337 + m.x2018 <= 7)
m.c5159 = Constraint(expr= 7*m.b338 + m.x2019 <= 7)
m.c5160 = Constraint(expr= 7*m.b339 + m.x2020 <= 7)
m.c5161 = Constraint(expr= 7*m.b340 + m.x2021 <= 7)
m.c5162 = Constraint(expr= 8*m.b341 + m.x2022 <= 8)
m.c5163 = Constraint(expr= 8*m.b342 + m.x2023 <= 8)
m.c5164 = Constraint(expr= 8*m.b343 + m.x2024 <= 8)
m.c5165 = Constraint(expr= 8*m.b344 + m.x2025 <= 8)
m.c5166 = Constraint(expr= 8*m.b345 + m.x2026 <= 8)
m.c5167 = Constraint(expr= 8*m.b346 + m.x2027 <= 8)
m.c5168 = Constraint(expr= 8*m.b347 + m.x2028 <= 8)
m.c5169 = Constraint(expr= 8*m.b348 + m.x2029 <= 8)
m.c5170 = Constraint(expr= 8*m.b349 + m.x2030 <= 8)
m.c5171 = Constraint(expr= 8*m.b350 + m.x2031 <= 8)
m.c5172 = Constraint(expr= 8*m.b351 + m.x2032 <= 8)
m.c5173 = Constraint(expr= 8*m.b352 + m.x2033 <= 8)
m.c5174 = Constraint(expr= 8*m.b353 + m.x2034 <= 8)
m.c5175 = Constraint(expr= 8*m.b354 + m.x2035 <= 8)
m.c5176 = Constraint(expr= 8*m.b355 + m.x2036 <= 8)
m.c5177 = Constraint(expr= 8*m.b356 + m.x2037 <= 8)
m.c5178 = Constraint(expr= 8*m.b357 + m.x2038 <= 8)
m.c5179 = Constraint(expr= 8*m.b358 + m.x2039 <= 8)
m.c5180 = Constraint(expr= 8*m.b359 + m.x2040 <= 8)
m.c5181 = Constraint(expr= 8*m.b360 + m.x2041 <= 8)
m.c5182 = Constraint(expr= 7*m.b361 + m.x2042 <= 7)
m.c5183 = Constraint(expr= 7*m.b362 + m.x2043 <= 7)
m.c5184 = Constraint(expr= 7*m.b363 + m.x2044 <= 7)
m.c5185 = Constraint(expr= 7*m.b364 + m.x2045 <= 7)
m.c5186 = Constraint(expr= 7*m.b365 + m.x2046 <= 7)
m.c5187 = Constraint(expr= 7*m.b366 + m.x2047 <= 7)
m.c5188 = Constraint(expr= 7*m.b367 + m.x2048 <= 7)
m.c5189 = Constraint(expr= 7*m.b368 + m.x2049 <= 7)
m.c5190 = Constraint(expr= 7*m.b369 + m.x2050 <= 7)
m.c5191 = Constraint(expr= 7*m.b370 + m.x2051 <= 7)
m.c5192 = Constraint(expr= 7*m.b371 + m.x2052 <= 7)
m.c5193 = Constraint(expr= 7*m.b372 + m.x2053 <= 7)
m.c5194 = Constraint(expr= 7*m.b373 + m.x2054 <= 7)
m.c5195 = Constraint(expr= 7*m.b374 + m.x2055 <= 7)
m.c5196 = Constraint(expr= 7*m.b375 + m.x2056 <= 7)
m.c5197 = Constraint(expr= 7*m.b376 + m.x2057 <= 7)
m.c5198 = Constraint(expr= 7*m.b377 + m.x2058 <= 7)
m.c5199 = Constraint(expr= 7*m.b378 + m.x2059 <= 7)
m.c5200 = Constraint(expr= 7*m.b379 + m.x2060 <= 7)
m.c5201 = Constraint(expr= 7*m.b380 + m.x2061 <= 7)
m.c5202 = Constraint(expr= 7*m.b381 + m.x2062 <= 7)
m.c5203 = Constraint(expr= 7*m.b382 + m.x2063 <= 7)
m.c5204 = Constraint(expr= | |
实例名称,目前只有Ckafaka会用到
:type InstanceName: str
:param ErrMsg: 错误消息
:type ErrMsg: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Endpoint = None
self.QueueName = None
self.ProductID = None
self.MsgType = None
self.Result = None
self.RoleName = None
self.RoleID = None
self.QueueRegion = None
self.QueueType = None
self.InstanceId = None
self.InstanceName = None
self.ErrMsg = None
self.RequestId = None
def _deserialize(self, params):
self.Endpoint = params.get("Endpoint")
self.QueueName = params.get("QueueName")
self.ProductID = params.get("ProductID")
self.MsgType = params.get("MsgType")
self.Result = params.get("Result")
self.RoleName = params.get("RoleName")
self.RoleID = params.get("RoleID")
self.QueueRegion = params.get("QueueRegion")
self.QueueType = params.get("QueueType")
self.InstanceId = params.get("InstanceId")
self.InstanceName = params.get("InstanceName")
self.ErrMsg = params.get("ErrMsg")
self.RequestId = params.get("RequestId")
class CreateProductRequest(AbstractModel):
"""CreateProduct请求参数结构体
"""
def __init__(self):
r"""
:param ProductName: 产品名称
:type ProductName: str
:param DeviceType: 产品设备类型 1.普通设备 2.NVR设备
:type DeviceType: int
:param ProductVaildYears: 产品有效期
:type ProductVaildYears: int
:param Features: 设备功能码 ypsxth音频双向通话 spdxth视频单向通话
:type Features: list of str
:param ChipOs: 设备操作系统,通用设备填default
:type ChipOs: str
:param ChipManufactureId: 芯片厂商id,通用设备填default
:type ChipManufactureId: str
:param ChipId: 芯片id,通用设备填default
:type ChipId: str
:param ProductDescription: 产品描述信息
:type ProductDescription: str
:param EncryptionType: 认证方式 只支持取值为2 psk认证
:type EncryptionType: int
:param NetType: 连接类型,wifi表示WIFI连接,cellular表示4G连接
:type NetType: str
"""
self.ProductName = None
self.DeviceType = None
self.ProductVaildYears = None
self.Features = None
self.ChipOs = None
self.ChipManufactureId = None
self.ChipId = None
self.ProductDescription = None
self.EncryptionType = None
self.NetType = None
def _deserialize(self, params):
self.ProductName = params.get("ProductName")
self.DeviceType = params.get("DeviceType")
self.ProductVaildYears = params.get("ProductVaildYears")
self.Features = params.get("Features")
self.ChipOs = params.get("ChipOs")
self.ChipManufactureId = params.get("ChipManufactureId")
self.ChipId = params.get("ChipId")
self.ProductDescription = params.get("ProductDescription")
self.EncryptionType = params.get("EncryptionType")
self.NetType = params.get("NetType")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateProductResponse(AbstractModel):
"""CreateProduct返回参数结构体
"""
def __init__(self):
r"""
:param Data: 产品详情
:type Data: :class:`tencentcloud.iotvideo.v20201215.models.VideoProduct`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Data = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Data") is not None:
self.Data = VideoProduct()
self.Data._deserialize(params.get("Data"))
self.RequestId = params.get("RequestId")
class CreateTaskFileUrlRequest(AbstractModel):
"""CreateTaskFileUrl请求参数结构体
"""
def __init__(self):
r"""
:param ProductId: 产品ID
:type ProductId: str
"""
self.ProductId = None
def _deserialize(self, params):
self.ProductId = params.get("ProductId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateTaskFileUrlResponse(AbstractModel):
"""CreateTaskFileUrl返回参数结构体
"""
def __init__(self):
r"""
:param Url: 任务文件上传链接
:type Url: str
:param FileName: 任务文件名
:type FileName: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Url = None
self.FileName = None
self.RequestId = None
def _deserialize(self, params):
self.Url = params.get("Url")
self.FileName = params.get("FileName")
self.RequestId = params.get("RequestId")
class DataForward(AbstractModel):
"""数据转发描述
"""
def __init__(self):
r"""
:param ProductId: 产品ID。
:type ProductId: str
:param ForwardAddr: 转发地址。
:type ForwardAddr: str
:param Status: 转发状态。
:type Status: int
:param CreateTime: 创建时间。
:type CreateTime: int
:param UpdateTime: 更新时间。
:type UpdateTime: int
:param DataChose: 1-数据信息转发 2-设备上下线状态转发 3-数据信息转发&设备上下线状态转发
注意:此字段可能返回 null,表示取不到有效值。
:type DataChose: int
"""
self.ProductId = None
self.ForwardAddr = None
self.Status = None
self.CreateTime = None
self.UpdateTime = None
self.DataChose = None
def _deserialize(self, params):
self.ProductId = params.get("ProductId")
self.ForwardAddr = params.get("ForwardAddr")
self.Status = params.get("Status")
self.CreateTime = params.get("CreateTime")
self.UpdateTime = params.get("UpdateTime")
self.DataChose = params.get("DataChose")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DeleteDeviceRequest(AbstractModel):
"""DeleteDevice请求参数结构体
"""
def __init__(self):
r"""
:param ProductId: 产品ID。
:type ProductId: str
:param DeviceName: 设备名称。
:type DeviceName: str
"""
self.ProductId = None
self.DeviceName = None
def _deserialize(self, params):
self.ProductId = params.get("ProductId")
self.DeviceName = params.get("DeviceName")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DeleteDeviceResponse(AbstractModel):
"""DeleteDevice返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteFirmwareRequest(AbstractModel):
"""DeleteFirmware请求参数结构体
"""
def __init__(self):
r"""
:param ProductID: 产品ID
:type ProductID: str
:param FirmwareVersion: 固件版本
:type FirmwareVersion: str
"""
self.ProductID = None
self.FirmwareVersion = None
def _deserialize(self, params):
self.ProductID = params.get("ProductID")
self.FirmwareVersion = params.get("FirmwareVersion")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DeleteFirmwareResponse(AbstractModel):
"""DeleteFirmware返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteForwardRuleRequest(AbstractModel):
"""DeleteForwardRule请求参数结构体
"""
def __init__(self):
r"""
:param ProductID: 产品ID
:type ProductID: str
:param Skey: 控制台Skey
:type Skey: str
:param QueueType: 队列类型
:type QueueType: int
:param QueueName: 队列名称
:type QueueName: str
"""
self.ProductID = None
self.Skey = None
self.QueueType = None
self.QueueName = None
def _deserialize(self, params):
self.ProductID = params.get("ProductID")
self.Skey = params.get("Skey")
self.QueueType = params.get("QueueType")
self.QueueName = params.get("QueueName")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DeleteForwardRuleResponse(AbstractModel):
"""DeleteForwardRule返回参数结构体
"""
def __init__(self):
r"""
:param Endpoint: 腾讯云账号
:type Endpoint: str
:param QueueName: 队列名称
:type QueueName: str
:param ProductID: 产品ID
:type ProductID: str
:param Result: 删除结果 0成功 其他不成功
:type Result: int
:param ErrMsg: 错误消息
:type ErrMsg: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Endpoint = None
self.QueueName = None
self.ProductID = None
self.Result = None
self.ErrMsg = None
self.RequestId = None
def _deserialize(self, params):
self.Endpoint = params.get("Endpoint")
self.QueueName = params.get("QueueName")
self.ProductID = params.get("ProductID")
self.Result = params.get("Result")
self.ErrMsg = params.get("ErrMsg")
self.RequestId = params.get("RequestId")
class DeleteProductRequest(AbstractModel):
"""DeleteProduct请求参数结构体
"""
def __init__(self):
r"""
:param ProductId: 产品ID
:type ProductId: str
"""
self.ProductId = None
def _deserialize(self, params):
self.ProductId = params.get("ProductId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DeleteProductResponse(AbstractModel):
"""DeleteProduct返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DescribeAIModelApplicationsRequest(AbstractModel):
"""DescribeAIModelApplications请求参数结构体
"""
def __init__(self):
r"""
:param ModelId: 模型ID
:type ModelId: str
:param Limit: 分页的大小,最大100
:type Limit: int
:param Offset: 偏移量,Offset从0开始
:type Offset: int
:param ProductId: 产品ID
:type ProductId: str
"""
self.ModelId = None
self.Limit = None
self.Offset = None
self.ProductId = None
def _deserialize(self, params):
self.ModelId = params.get("ModelId")
self.Limit = params.get("Limit")
self.Offset = params.get("Offset")
self.ProductId = params.get("ProductId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeAIModelApplicationsResponse(AbstractModel):
"""DescribeAIModelApplications返回参数结构体
"""
def __init__(self):
r"""
:param TotalCount: 申请记录数量
:type TotalCount: int
:param Applications: 申请记录数组
:type Applications: list of AIModelApplication
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.Applications = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("Applications") is not None:
self.Applications = []
for item in params.get("Applications"):
obj = AIModelApplication()
obj._deserialize(item)
self.Applications.append(obj)
self.RequestId = params.get("RequestId")
class DescribeAIModelChannelRequest(AbstractModel):
"""DescribeAIModelChannel请求参数结构体
"""
def __init__(self):
r"""
:param ModelId: 模型ID
:type ModelId: str
:param ProductId: 产品ID
:type ProductId: str
"""
self.ModelId = None
self.ProductId = None
def _deserialize(self, params):
self.ModelId = params.get("ModelId")
self.ProductId = params.get("ProductId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeAIModelChannelResponse(AbstractModel):
"""DescribeAIModelChannel返回参数结构体
"""
def __init__(self):
r"""
:param Type: 推送类型。ckafka:消息队列;forward:http/https推送
:type Type: str
:param ForwardAddress: 第三方推送地址
注意:此字段可能返回 null,表示取不到有效值。
:type ForwardAddress: str
:param ForwardKey: 第三方推送密钥
注意:此字段可能返回 null,表示取不到有效值。
:type ForwardKey: str
:param CKafkaRegion: ckafka地域
注意:此字段可能返回 null,表示取不到有效值。
:type CKafkaRegion: str
:param CKafkaInstance: ckafka实例
注意:此字段可能返回 null,表示取不到有效值。
:type CKafkaInstance: str
:param CKafkaTopic: ckafka订阅主题
注意:此字段可能返回 null,表示取不到有效值。
:type CKafkaTopic: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Type = None
self.ForwardAddress = None
self.ForwardKey = None
self.CKafkaRegion = None
self.CKafkaInstance = None
self.CKafkaTopic = None
self.RequestId = None
def _deserialize(self, params):
self.Type = params.get("Type")
self.ForwardAddress = params.get("ForwardAddress")
self.ForwardKey = params.get("ForwardKey")
self.CKafkaRegion = params.get("CKafkaRegion")
self.CKafkaInstance = params.get("CKafkaInstance")
self.CKafkaTopic = params.get("CKafkaTopic")
self.RequestId = params.get("RequestId")
class DescribeAIModelUsageRequest(AbstractModel):
"""DescribeAIModelUsage请求参数结构体
"""
def __init__(self):
r"""
:param ModelId: 模型ID
:type ModelId: str
:param ProductId: 产品ID
:type ProductId: str
:param Offset: 偏移量,从0开始
:type Offset: int
:param Limit: 分页的大小,最大100
:type Limit: int
"""
self.ModelId = None
self.ProductId = None
self.Offset = None
self.Limit = None
def _deserialize(self, params):
self.ModelId = params.get("ModelId")
self.ProductId = params.get("ProductId")
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
memeber_set = set(params.keys())
for name, value in vars(self).items():
| |
import sys
import os
import yaml
import argparse
import numpy as np
import pandas as pd
import csv
import random
import stat
import glob
import subprocess
from statistics import mean
from pprint import pprint, pformat
import geopandas
from shapely.geometry import Point
from math import sin, cos, atan2, sqrt, pi
from pymoo.algorithms.moo.nsga2 import NSGA2
from pymoo.algorithms.moo.nsga3 import NSGA3
from pymoo.algorithms.moo.moead import MOEAD, ParallelMOEAD
from pymoo.factory import get_sampling, get_crossover, get_mutation, \
get_problem, get_reference_directions
from pymoo.optimize import minimize
from pymoo.visualization.scatter import Scatter
from pymoo.core.problem import Problem
from pymoo.factory import get_performance_indicator
from moo_algs.bce_moead import BCEMOEAD
import time
from datetime import timedelta
work_dir = os.path.dirname(os.path.abspath(__file__))
EXEC_LOG_FILE = None
USE_PJ = False
QCG_MANAGER = None
class dict_to_obj:
def __init__(self, in_dict: dict):
assert isinstance(in_dict, dict)
for key, val in in_dict.items():
if isinstance(val, (list, tuple)):
setattr(self, key, [dict_to_obj(x) if isinstance(
x, dict) else x for x in val])
else:
setattr(self, key, dict_to_obj(val)
if isinstance(val, dict) else val)
def MOO_log(msg):
with open(EXEC_LOG_FILE, "a") as log_file:
print("{}".format(msg), file=log_file)
def read_MOO_setting_yaml():
"""
read MOO setting from yaml file
"""
with open(os.path.join(work_dir, "MOO_setting.yaml")) as f:
MOO_CONFIG = yaml.safe_load(f)
# convert the json to a nested object
# MOO_CONFIG_DICT = dict_to_obj(MOO_CONFIG)
# return MOO_CONFIG_DICT
return MOO_CONFIG
class FLEE_MOO_Problem(Problem):
def __init__(self, execution_mode, simulation_period, cores,
work_dir=work_dir):
# TODO: add input vraibles to MOO_setting.yaml file
super().__init__(n_var=1,
n_obj=5,
xl=np.array([0]), #
xu=np.array([19688])) #
self.work_dir = work_dir
self.cnt_SWEEP_dir = 0
self.execution_mode = execution_mode
self.simulation_period = simulation_period
self.cores = cores
def avg_distance(self, agents_out_files, camp_name):
df_array = [pd.read_csv(filename, index_col=None, header=0)
for filename in agents_out_files]
df = pd.concat(df_array, axis=0, ignore_index=True)
# filter rows for agent location == camp_name
df = df[(df["agent location"] == camp_name) &
(df["distance_moved_this_timestep"] > 0)
]
df.to_csv(os.path.join(
os.path.dirname(agents_out_files[0]), "df_agents.out.csv"),
sep=",",
mode="w",
index=False,
encoding='utf-8'
)
return df["distance_travelled"].mean()
def find_closest_location_to_camp(self, camp_lon, camp_lat):
# in kilometres
R = 6371
p = pi/180
dist = []
locations=[]
# Read lat(Latitude) and lon(Longitude) column in locations.csv file row by row.
locations_path = os.path.join(self.work_dir, "input_csv", "locations.csv")
with open(locations_path, newline='') as csvfile:
reader = csv.reader(csvfile)
next(reader)
# Iterate over each row after the header in the csv
for row in reader:
# row variable is a list that represents a row in csv
# print(row)
if row[2] == 'South_Sudan':
locations.append(row[0])
lat = float(row[3])
lon = float(row[4])
MOO_log(msg="\tlocation ={}".format(row[0]))
MOO_log(msg="\tlongitude ={}".format(lon))
MOO_log(msg="\tlatitude ={}".format(lat))
# calculate the haversine distance between Z and other locations in south sudan, respectively.
phi = (camp_lat-lat) * p
lam = (lon-camp_lon) * p
a = sin(phi/2)*sin(phi/2)+cos(lat*p)*cos(camp_lat*p)*sin(lam/2)*sin(lam/2);
c = 2*atan2(sqrt(a),sqrt(1-a))
dist.append(R * c)
MOO_log(msg="\tall locations ={}".format(locations))
MOO_log(msg="\tdistance between these locations and Z={}".format(dist))
# find the shortest path
min_dist = np.amin(dist)
index_min_dist = dist.index(min_dist)
nearest_loc = locations[index_min_dist]
return nearest_loc, min_dist
# --------------------------------------------------------------------------
def change_route_to_camp(self, csv_name):
"""
Change the location that connect to the camp
"""
MOO_log(msg="\n[change_route_to_camp]")
selectedCamps_csv_PATH = os.path.join(self.work_dir, "input_csv", csv_name)
# Read the data in selectedCamps.csv file row by row.
with open(selectedCamps_csv_PATH, newline='') as csvfile:
reader = csv.reader(csvfile)
next(reader)
# print(header)
# Iterate over each row after the header in the csv
for row in reader:
# row variable is a list that represents a row in csv
# print(row)
lon = float(row[0])
lat = float(row[1])
ipc = float(row[2])
accessibility = float(row[3])
MOO_log(msg="\tcamp lon ={}".format(lon))
MOO_log(msg="\tcamp lat ={}".format(lat))
# 1. Find the nearest location to camp and calculate the distance
# between them.
nearest_loc, min_dist = self.find_closest_location_to_camp(
camp_lon=float(lon), camp_lat=float(lat)
)
# 2. Read routes.csv and modify the data (i.e., the nearest
# location to camp and the distance between them)
routes_csv_PATH = os.path.join(self.work_dir, "input_csv", "routes.csv")
df = pd.read_csv(routes_csv_PATH)
# change one value of a row
df.loc[lambda df: df['name2'] == 'Z', lambda df:'#name1'] = nearest_loc
df.loc[lambda df: df['name2'] == 'Z', lambda df:'distance'] = str(min_dist)
MOO_log(msg="\tLatitude of camp Z: {} \n\t"
"Longitude of camp Z: {}\n\t"
"nearest location: {}\n\t"
"distance to {}:{}".format(
float(lon),
float(lat),
nearest_loc,
nearest_loc, min_dist)
)
# 3. Write the updated route.csv in the moo_ssudan SWEEP
# directory.
sweep_dir = os.path.join(self.work_dir, "SWEEP")
# curr_dir_count = len(os.listdir(sweep_dir))
curr_dir_count = self.cnt_SWEEP_dir
sub_dir_SWEEP = os.path.join(
sweep_dir, "{}".format(curr_dir_count + 1), "input_csv"
)
if os.path.exists(sub_dir_SWEEP):
raise RuntimeError(
"SWEEP dir {} is exists !!!!!".format(sub_dir_SWEEP)
)
os.makedirs(sub_dir_SWEEP)
MOO_log(msg="\tgenerates SWEEP : {}".format(sub_dir_SWEEP))
updated_routes_csv_PATH = os.path.join(sub_dir_SWEEP, "routes.csv")
df.to_csv(updated_routes_csv_PATH, index = False)
# 4. Write campIPC.csv in the moo_ssudan SWEEP directory
campIPC_PATH = os.path.join(sub_dir_SWEEP, "campIPC.csv")
with open(campIPC_PATH, "w", newline="") as fout:
writer = csv.writer(fout, delimiter=",")
writer.writerow(["lon", "lat", "ipc", "accessibility"])
writer.writerow([lon, lat, ipc, accessibility])
self.cnt_SWEEP_dir += 1
MOO_log(msg="\t{}".format("-" * 30))
# --------------------------------------------------------------------------
def flee_optmization(self, run_dir, camp_name):
MOO_log(msg="\n[flee_optmization] called for "
"run_dir = {} camp_name = {}".format(run_dir, camp_name)
)
# calculate camp population, obj#2
df = pd.read_csv(os.path.join(run_dir, "out.csv"))
sim_camp_population_last_day = df["{} sim".format(camp_name)].iloc[-1]
sim_camp_population = df["{} sim".format(camp_name)].tolist()
MOO_log(msg="\tsim camp {} population of the last day = {}".format(
camp_name, sim_camp_population_last_day)
)
MOO_log(msg="\tsim camp {} population = {}".format(
camp_name, sim_camp_population)
)
# find the agents.out files
agents_out_files = glob.glob(
"{}".format(os.path.join(run_dir, "agents.out.*"))
)
# obj#1
avg_distance_travelled = self.avg_distance(
agents_out_files=agents_out_files, camp_name=camp_name
)
MOO_log(
msg="\tInput file : {}"
"\n\t\tavg distance travelled for agents "
"to camp name {} = {}".format(
[os.path.basename(filename) for filename in agents_out_files],
camp_name,
avg_distance_travelled
)
)
# clean agents.out files to reduce the disk space usage
clean_agents_cmd = "rm {}".format(os.path.join(
os.path.dirname(agents_out_files[0]), "agents.out.*"))
subprocess.check_output(
clean_agents_cmd,
shell=True,
)
# calculate camp capacity
PopulationScaledownFactor = 100
df = pd.read_csv(os.path.join(run_dir, "input_csv", "locations.csv"))
camp_population = df[df["#name"] == camp_name]["population"].values[0]
camp_population = camp_population/PopulationScaledownFactor
MOO_log(msg="\tmax camp {} population = {}".format(
camp_name, camp_population)
)
# calculate average remain camp capacity over simulation days, obj#3
remain_camp_capacity = mean(
[abs(camp_population - i) for i in sim_camp_population]
)
MOO_log(msg="\tremain camp {} capacity = {}".format(
camp_name, remain_camp_capacity)
)
# calculate IPC phase, obj#4
input_dir_SWEEP = os.path.join(run_dir, "input_csv")
ipc_df = pd.read_csv(os.path.join(input_dir_SWEEP, "campIPC.csv"))
camp_ipc = float(ipc_df.loc[0,"ipc"])
# calculate accessibility score, obj#5
camp_accessibility = float(ipc_df.loc[0,"accessibility"])
MOO_log(msg="\tcamp {}: IPC phase = {},\taccessibility score = {}".format(
camp_name, camp_ipc, camp_accessibility)
)
# return values [obj#1, obj#2, obj#3, obj#4, obj#5]
return [avg_distance_travelled, sim_camp_population_last_day,
remain_camp_capacity, camp_ipc, camp_accessibility]
#------------------------------------start-----------------------------------
def run_simulation_with_PJ(self, sh_jobs_scripts):
"""
running simulation from SWEEP dir using PJ
"""
from qcg.pilotjob.api.job import Jobs
jobs = Jobs()
for sh_job_scripts in sh_jobs_scripts:
sweep_dir_name = os.path.basename(os.path.dirname(sh_job_scripts))
jobs.add(
name="SWEEP_{}".format(sweep_dir_name),
exec="bash",
args=["-l", sh_job_scripts],
stdout="{}/{}.stdout".format(
os.path.dirname(sh_job_scripts),
"${jname}__${uniq}"
),
stderr="{}/{}.stderr".format(
os.path.dirname(sh_job_scripts),
"${jname}__${uniq}"
),
numCores={"exact": self.cores},
model="default"
)
print("\nAdd job with :")
print("name=SWEEP_{}".format(sweep_dir_name))
print("args = [-l,{}]".format(sh_job_scripts))
print("stdout = {}/{}.stdout".format(
os.path.dirname(sh_job_scripts),
"${jname}__${uniq}")
)
print("stderr = {}/{}.stderr".format(
os.path.dirname(sh_job_scripts),
"${jname}__${uniq}")
)
print("numCores=exact: {}".format(self.cores))
ids = QCG_MANAGER.submit(jobs)
# wait until submited jobs finish
QCG_MANAGER.wait4(ids)
print("\nAll new SWEEP dirs are finished...\n")
def run_simulation_without_PJ(self, sh_jobs_scripts):
"""
running simulation from SWEEP dir without using PJ
"""
for sh_job_scripts in sh_jobs_scripts:
# subprocess.check_output(sh_job_scripts, shell=True)
try:
p = subprocess.Popen(sh_job_scripts, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
except Exception as e:
raise RuntimeError("Unexpected error: {}".format(e))
sys.exit()
acceptable_err_subprocesse_ret_codes = [0]
if p.returncode not in acceptable_err_subprocesse_ret_codes:
raise RuntimeError(
"\njob execution encountered an error (return code {})"
"while executing '{}'".format(p.returncode, command)
)
sys.exit(0)
#-------------------------------------end------------------------------------
def _evaluate(self, x, out, *args, **kwargs):
"""
1. The _evaluate method takes a one-dimensional NumPy array X with n rows as an input.
The row represents an individual, namely, the index of a possible camp location.
After doing the necessary calculations, the objective values must be
added to the dictionary, out, with the key F.
"""
# ---------------------------------start--------------------------------
# read accessible_camp_ipc.csv
df = pd.read_csv("accessible_camp_ipc.csv")
camp_coords_df = df[['lon', 'lat']]
coords = camp_coords_df.to_numpy()
# obtain coordinates of selected camps
X_1D = x.flatten()
X_1D = X_1D.astype('int64')
population = coords[X_1D, :]
pop_size = len(population)
MOO_log(
msg="\n{}\nExecuting _evaluate function with input "
"population : \n{}\n".format("-" * 30, pformat(population))
)
n = 1
for row in population:
MOO_log("\tpotential location {}: {}".format(n, row))
n += 1
# Get IPC phase data of each camp location
ipc = df.loc[X_1D, 'IPC']
ipc_list = ipc.tolist()
# Get accessibility score of each camp location
accessibility_score = df.loc[X_1D, 'landcover']
accessibility_list = accessibility_score.tolist()
selected_camps = [[*a, b, c] for a, b, c in zip(population, ipc_list, accessibility_list)]
selectedCamps_csv_PATH = os.path.join(
self.work_dir, "input_csv", "selectedCamps.csv"
)
# Save data to CSV
with open(selectedCamps_csv_PATH, "w", newline="") as file:
writer = csv.writer(file, delimiter=",")
writer.writerow(["Camp Longitude", "Camp Latitude", "IPC Score", "Accessibility Score"]) # header
writer.writerows(selected_camps)
# ------------------------------end-----------------------------------
# count the number of run folder in SWEEP dir
sweep_dir = os.path.join(self.work_dir, "SWEEP")
####################################################################
# Run change_route_to_camp function to update the routes.csv file #
# according to the parameter ind, which is the coordinate of camp. | |
<reponame>ccj5351/DAFStereoNets<filename>src/baselines/PACNet/task_jointUpsampling/main.py
"""
Copyright (C) 2019 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
"""
import argparse
import os
import glob
import numpy as np
import torch
import torch.nn.functional as F
import torch.optim as optim
from . import models, datasets
def apply_model(net, lres, guide, factor):
h0, w0 = lres.shape[-2:]
h, w = guide.shape[-2:]
if h0 * factor != h or w0 * factor != w:
guide = F.interpolate(guide, size=(h0 * factor, w0 * factor), align_corners=False, mode='bilinear')
out = net(lres, guide)
out = F.interpolate(out, size=(h, w), align_corners=False, mode='bilinear')
else:
out = net(lres, guide)
return out
def train(model, train_loader, optimizer, device, epoch, lr, loss_type, perf_measures, args):
model.train()
log = []
for batch_idx, sample in enumerate(train_loader):
lres, guide, target = sample[0].to(device), sample[1].to(device), sample[2].to(device)
if len(sample) >= 4:
raw_range = sample[3].to(device)
_ch = raw_range.shape[1]
raw_min = raw_range[:, :, 0].view(-1, _ch, 1, 1)
raw_scale = raw_range[:, :, 1].view(-1, _ch, 1, 1) - raw_min
else:
raw_min, raw_scale = 0.0, 1.0
optimizer.zero_grad()
output = apply_model(model, lres, guide, args.factor)
crop = tuple(((o - t) // 2, t) for o, t in zip(output.shape[-2:], target.shape[-2:]))
output = output[:, :, crop[0][0]:crop[0][0]+crop[0][1], crop[1][0]:crop[1][0]+crop[1][1]]
if loss_type == 'l2':
loss = F.mse_loss(output, (target - raw_min) / raw_scale)
elif loss_type == 'epe':
loss = models.th_epe((output * raw_scale) + raw_min, target)
elif loss_type == 'rmse':
loss = models.th_rmse((output * raw_scale) + raw_min, target)
else:
raise ValueError('Loss type ({}) not supported.'.format(args.loss))
loss.backward()
optimizer.step()
batch_cnt = batch_idx + 1
sample_cnt = batch_idx * args.batch_size + len(lres)
progress = sample_cnt / len(train_loader.dataset)
if batch_cnt == len(train_loader) or batch_cnt % args.log_interval == 0:
log_row = [progress + epoch - 1, lr, loss.item()]
for m in perf_measures:
if m == 'epe':
log_row.append(models.th_epe((output * raw_scale) + raw_min, target).item())
elif m == 'rmse':
log_row.append(models.th_rmse((output * raw_scale) + raw_min, target).item())
log.append(log_row)
if batch_cnt == len(train_loader) or batch_cnt % args.print_interval == 0:
print('Train Epoch {} [{}/{} ({:3.0f}%)]\tLR: {:g}\tLoss: {:.6f}\t'.format(
epoch, sample_cnt, len(train_loader.dataset), 100. * progress, lr, loss.item()))
return log
def test(model, test_loader, device, epoch, lr, loss_type, perf_measures, args):
model.eval()
loss_accum = 0
perf_measures_accum = [0.0] * len(perf_measures)
with torch.no_grad():
for sample in test_loader:
lres, guide, target = sample[0].to(device), sample[1].to(device), sample[2].to(device)
if len(sample) >= 4:
raw_range = sample[3].to(device)
_ch = raw_range.shape[1]
raw_min = raw_range[:, :, 0].view(-1, _ch, 1, 1)
raw_scale = raw_range[:, :, 1].view(-1, _ch, 1, 1) - raw_min
else:
raw_min, raw_scale = 0.0, 1.0
output = apply_model(model, lres, guide, args.factor)
crop = tuple(((o - t) // 2, t) for o, t in zip(output.shape[-2:], target.shape[-2:]))
output = output[:, :, crop[0][0]:crop[0][0]+crop[0][1], crop[1][0]:crop[1][0]+crop[1][1]]
if loss_type == 'l2':
loss = F.mse_loss(output, (target - raw_min) / raw_scale)
elif loss_type == 'epe':
loss = models.th_epe((output * raw_scale) + raw_min, target)
elif loss_type == 'rmse':
loss = models.th_rmse((output * raw_scale) + raw_min, target)
else:
raise ValueError('Loss type ({}) not supported.'.format(args.loss))
loss_accum += loss.item() * len(output)
for i, m in enumerate(perf_measures):
if m == 'epe':
perf_measures_accum[i] += models.th_epe((output * raw_scale) + raw_min, target).item() * len(output)
elif m == 'rmse':
perf_measures_accum[i] += models.th_rmse((output * raw_scale) + raw_min, target).item() * len(output)
test_loss = loss_accum / len(test_loader.dataset)
log = [float(epoch), lr, test_loss]
msg = 'Average loss: {:.6f}\n'.format(test_loss)
for m, mv in zip(perf_measures, perf_measures_accum):
avg = mv / len(test_loader.dataset)
msg += '{}: {:.6f}\n'.format(m, avg)
log.append(avg)
print('\nTesting (#epochs={})'.format(epoch))
print(msg)
return [log]
def prepare_log(log_path, header, last_epoch=0):
# keep all existing log lines up to epoch==last_epoch (included)
try:
log = np.genfromtxt(log_path, delimiter=',', skip_header=1, usecols=(0,))
except:
log = []
if len(log) > 0:
idxs = np.where(log <= last_epoch)[0]
if len(idxs) > 0:
lines_to_keep = max(idxs) + 2
with open(log_path) as f:
lines = f.readlines()
with open(log_path, 'w') as f:
f.writelines(lines[:lines_to_keep])
return
with open(log_path, 'w') as f:
f.write(header + '\n')
def main():
parser = argparse.ArgumentParser(description='Joint upsampling',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--factor', type=int, default=8, metavar='R',
help='upsampling factor')
parser.add_argument('--data-root', type=str, default='data', metavar='D',
help='place to find (or download) data')
parser.add_argument('--exp-root', type=str, default='exp', metavar='E',
help='place to save results')
parser.add_argument('--download', default=False, action='store_true',
help='download dataset if not found locally')
parser.add_argument('--load-weights', type=str, default='', metavar='L',
help='file with pre-trained weights')
#NOTE:model selection
parser.add_argument('--model', type=str, default='PacJointUpsample', metavar='M',
help='network model type')
parser.add_argument('--dataset', type=str, default='NYUDepthV2', metavar='D',
help='dataset')
parser.add_argument('--lowres-mode', type=str, default='', metavar='LM',
help='overwrite how lowres samples are generated')
parser.add_argument('--zero-guidance', default=False, action='store_true',
help='use zeros for guidance')
parser.add_argument('--loss', type=str, default='l2', metavar='L',
help='choose a loss function type')
parser.add_argument('--measures', nargs='+', default=None, metavar='M',
help='performance measures to be reported during training and testing')
parser.add_argument('--num-data-worker', type=int, default=4, metavar='W',
help='number of subprocesses for data loading')
parser.add_argument('--val-ratio', type=float, default=0.0, metavar='V',
help='use this portion of training set for validation')
parser.add_argument('--train-split', type=str, default='', metavar='TRAIN',
help='specify a subset for training')
parser.add_argument('--test-split', type=str, default='', metavar='TEST',
help='specify a subset for testing')
parser.add_argument('--batch-size', type=int, default=1, metavar='N',
help='input batch size for training')
parser.add_argument('--test-batch-size', type=int, default=1, metavar='N',
help='input batch size for testing')
parser.add_argument('--train-crop', type=int, default=256, metavar='CROP',
help='input crop size in training')
parser.add_argument('--eval-border', type=int, default=-1, metavar='EB',
help='specify a border that is excluded from evaluating loss and error')
parser.add_argument('--epochs', type=int, default=0, metavar='N',
help='number of epochs to train')
parser.add_argument('--optimizer', type=str, default='Adam', metavar='O',
help='pick which optimizer to use')
parser.add_argument('--lr', type=float, default=0.0001, metavar='LR',
help='learning rate')
parser.add_argument('--lr-steps', nargs='+', default=None, metavar='S',
help='decrease lr by 10 at these epochs')
parser.add_argument('--overwrite', default=False, action='store_true',
help='ignore existing log files and snapshots')
parser.add_argument('--weight-decay', type=float, default=0.0, metavar='WD',
help='Adam/SGD weight decay')
parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
help='SGD momentum')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed')
parser.add_argument('--print-interval', type=int, default=100, metavar='N',
help='how many batches to wait before displaying training status')
parser.add_argument('--log-interval', type=int, default=100, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--test-interval', type=int, default=10, metavar='N',
help='how many epochs to wait before a testing')
parser.add_argument('--snapshot-interval', type=int, default=100, metavar='N',
help='snapshot intermediate models')
args = parser.parse_args()
torch.manual_seed(args.seed)
# use_cuda = not args.no_cuda and torch.cuda.is_available()
assert(torch.cuda.is_available())
use_cuda = True
device = torch.device("cuda" if use_cuda else "cpu")
dl_kwargs = {'num_workers': args.num_data_worker, 'pin_memory': True} if use_cuda else {}
# find existing snapshots
os.makedirs(args.exp_root, exist_ok=True)
snapshots_found = sorted([int(s.split('_')[-1].rstrip('.pth'))
for s in glob.glob(os.path.join(args.exp_root, 'weights_epoch_*.pth'))])
load_weights = args.load_weights
if snapshots_found and not args.overwrite:
last_epoch = max(snapshots_found) if args.epochs > max(snapshots_found) else args.epochs
assert last_epoch in snapshots_found
assert not load_weights
load_weights = os.path.join(args.exp_root, 'weights_epoch_{}.pth'.format(last_epoch))
else:
last_epoch = 0
test_only = (args.epochs <= last_epoch)
# dataset
if args.dataset == 'NYUDepthV2':
ch, guide_ch = 1, 3
eval_border = 6 if args.eval_border < 0 else args.eval_border
perf_measures = ('rmse',) if not args.measures else args.measures
train_split = 'train' if not args.train_split else args.train_split
test_split = 'test' if not args.test_split else args.test_split
lowres_mode = 'center' if not args.lowres_mode else args.lowres_mode
train_transform = datasets.AssembleJointUpsamplingInputs(args.factor, flip=True, lowres_mode=lowres_mode,
zero_guidance=args.zero_guidance,
output_crop=eval_border, crop=(
None if args.train_crop <= 0 else args.train_crop))
test_transform = datasets.AssembleJointUpsamplingInputs(args.factor, flip=False, lowres_mode=lowres_mode,
zero_guidance=args.zero_guidance,
output_crop=eval_border)
if args.epochs > 0:
train_dset = datasets.NYUDepthV2(args.data_root, transform=train_transform, download=args.download,
split=train_split, val_ratio=args.val_ratio, cache_all=True)
else:
train_dset = None
test_dset = datasets.NYUDepthV2(args.data_root, transform=test_transform, download=args.download,
split=test_split, val_ratio=args.val_ratio, cache_all=True)
elif args.dataset in ('Sintel', 'Sintel-clean', 'Sintel-final', 'Sintel-albedo'):
render_pass = '<PASSWORD>' if args.dataset == 'Sintel' else args.dataset.split('-')[1]
ch, guide_ch = 1, 3
eval_border = 0 if args.eval_border < 0 else args.eval_border
perf_measures = ('epe',) if not args.measures else args.measures
train_split = 'train' if not args.train_split else args.train_split
test_split = 'val' if not args.test_split else args.test_split
lowres_mode = 'bilinear' if not args.lowres_mode else args.lowres_mode
train_transform = datasets.AssembleJointUpsamplingInputs(args.factor, flip=True, lowres_mode=lowres_mode,
zero_guidance=args.zero_guidance,
output_crop=eval_border, crop=(
None if args.train_crop <= 0 else args.train_crop))
test_transform = datasets.AssembleJointUpsamplingInputs(args.factor, flip=False, lowres_mode=lowres_mode,
zero_guidance=args.zero_guidance,
output_crop=eval_border)
if args.epochs > 0:
train_dset = datasets.Sintel(args.data_root, transform=train_transform, download=args.download,
cache_all=True, fields=(render_pass + '_1', 'flow'), split=train_split)
else:
train_dset = None
test_dset = datasets.Sintel(args.data_root, transform=test_transform, download=args.download, cache_all=True,
fields=(render_pass + '_1', 'flow'), split=test_split)
else:
raise ValueError('Dataset ({}) not supported.'.format(args.dataset))
# data loader
if test_only:
train_loader = None
else:
train_loader = torch.utils.data.DataLoader(train_dset, batch_size=args.batch_size, shuffle=True, **dl_kwargs)
test_loader = torch.utils.data.DataLoader(test_dset, batch_size=args.test_batch_size, shuffle=True, **dl_kwargs)
# model
if args.model.startswith('JBU'):
ks, s_color, s_spatial = args.model.split('_')[1:]
model = models.JointBilateral(channels=ch, factor=args.factor, kernel_size=int(ks),
scale_color=float(s_color), scale_space=float(s_spatial))
else:
model = models.__dict__[args.model](channels=ch, guide_channels=guide_ch, factor=args.factor)
if load_weights:
model.load_state_dict(torch.load(load_weights))
print('\nModel weights initialized from: {}'.format(load_weights))
model = model.to(device)
# optimizer, scheduler, and logs
if not test_only:
if args.optimizer == 'Adam':
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
elif args.optimizer == 'SGD':
optimizer = optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.weight_decay, momentum=args.momentum)
else:
raise ValueError('Optimizer type ({}) is not supported.'.format(args.optimizer))
scheduler = optim.lr_scheduler.MultiStepLR(optimizer,
[] if not args.lr_steps else [int(v) for v in args.lr_steps],
gamma=0.1,
last_epoch=-1)
for s in range(last_epoch):
scheduler.step() # TODO: a temporary | |
<reponame>google-research/hyperbo
# coding=utf-8
# Copyright 2022 HyperBO Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convex gradient-based optimization algorithms.
This file contains simple implementations of some convex gradient based
optimization algorithms, specifically for use with non-jittable large scale
functions.
Author: <NAME>.
"""
from absl import logging
from flax.core import frozen_dict
import jax
import jax.numpy as jnp
@jax.jit
def _dict_tensordot(a, b, axes):
fn = lambda a, b: jnp.tensordot(a, b, axes)
return jax.tree_multimap(fn, a, b)
@jax.jit
def _dict_vdot(a, b):
return jax.tree_util.tree_reduce(jnp.add, jax.tree_multimap(jnp.vdot, a, b))
def _return_index(fn, index):
def wrapper(*args, **kwargs):
result = fn(*args, **kwargs)
return result[index]
return wrapper
def backtracking_linesearch(val_and_grad_fn,
cur_val,
params,
grads,
direction,
alpha=1.,
c1=1e-4,
c2=0.9,
tau=0.5,
max_steps=50,
has_aux=False,
args=tuple()):
"""A simple two-directional backtracking line-search.
Uses the Armijo–Goldstein and Wolfe conditions to determine the step size.
These two are generally bundled into what's called the Wolfe conditions. They
measure whether "sufficient progress" was made given the current step size.
The Armijo-Goldstein in 0th order (is the value significantly better adjusted
for the scale of the function) and the Wolfe in 1st order (e.g. is the
gradient still steep). The second Wolfe condition requires a gradient eval
and is generally required to guarantee convergence of a variety of approximate
second order methods (such as LBFGS).
This assumes one is minimizing the function fn.
Args:
val_and_grad_fn: The function that is being minimized, of the form fn(x) =
y.
cur_val: The current function value, i.e. conditioned on params.
params: A dict of numpy arrays of parameters passed to fn.
grads: Gradients of the function fn at position defined by params.
direction: A dict with directions to take steps in for each of the values,
corresponding to params.
alpha: initial step size.
c1: A scalar search control parameter determining the strength of
convergence in (0, 1) for the Armijo condition.
c2: A scalar search control parameter determining the strength of
convergence in (0, 1) for the curvature confition.
tau: A scalar search control parameter determining the strength of
convergence in (0, 1).
max_steps: Maximum number of times to evaluate fn and take linesearch steps.
has_aux: Boolean indicating whether fn returns anything in addition to a
scalar value, as in jax.value_and_grad.
args: A tuple containing any additional positional arguments to fn, such
that fn will be called as fn(params, *args)
Returns:
new_val: The resulting value achieved by following the linesearch.
alpha: The determined step size.
"""
grads_dot_dir = _dict_vdot(grads, direction)
if grads_dot_dir > 0.:
logging.info("Incorrect descent direction %f. Exiting linesearch",
grads_dot_dir)
return params, alpha
t = c1 * grads_dot_dir
armijo_cond = lambda x, a: jnp.greater_equal(cur_val + a * t, x)
def wolfe_curvature_cond(new_grads):
return jnp.greater_equal(
_dict_vdot(new_grads, direction), c2 * grads_dot_dir)
for i in range(max_steps):
fn = lambda a, b: a + b * alpha
new_params = jax.tree_multimap(fn, params, direction)
new_val, new_grads = val_and_grad_fn(new_params, *args)
if has_aux:
new_val = new_val[0]
logging.info(
"Linesearch: step %i orig: %f new: %f step size: %f Armijo cond %d", i,
cur_val, new_val, alpha, armijo_cond(new_val, alpha))
if jnp.isfinite(new_val) and armijo_cond(new_val, alpha):
if wolfe_curvature_cond(new_grads):
logging.info("Satisfied linesearch Wolfe conditions: step %i %f", i,
new_val)
return new_val, alpha
else:
alpha *= 2.1
else:
alpha *= tau
if (not jnp.isnan(new_val)) and jnp.isfinite(new_val):
return new_val, alpha
else: # If we hit nans or infs return where we started.
return cur_val, 0.
@jax.jit
def lbfgs_descent_dir_nocedal(grads, s, y):
"""Compute the descent direction for L-BFGS.
This computes a very coarse but memory efficient estimate of the
Hessian-gradient product to determine a linesearch direction.
This follows the recursive algorithm specified in "Updating Quasi-Newton
Matrices with Limited Storage", Nocedal '80, p 779. Note variable names
mirror those from Nocedal.
Args:
grads: A dict where the values are arrays corresponding to the gradients of
the function being optimized.
s: A list of dicts of length M containing the difference in gradients
(corresponding to grads) from the last M LBFGS updates.
y: A list of dicts of length M containing the difference in parameters
(corresponding to grads) from the last M LBFGS updates.
Returns:
direction: A dict corresponding to descent directions in similar form to
grads.
"""
bound = len(s)
q = jax.tree_map(lambda x: -x, grads)
inv_p = [1. / _dict_vdot(y[i], s_i) for i, s_i in enumerate(s)]
alphas = {}
for i in range(bound - 1, -1, -1):
alpha = inv_p[i] * _dict_vdot(s[i], q)
alphas[i] = alpha
q = jax.tree_multimap(lambda a, b, alpha=alpha: a - alpha * b, q, y[i])
gamma_k = _dict_vdot(s[-1], y[-1]) / _dict_vdot(y[-1], y[-1])
direction = jax.tree_map(lambda x: gamma_k * x, q)
for i in range(0, bound):
beta = inv_p[i] * _dict_vdot(y[i], direction)
step = (alphas[i] - beta)
fn = lambda a, b, step=step: a + b * step
direction = jax.tree_multimap(fn, direction, s[i])
return direction
def lbfgs(fn,
params,
memory=10,
ls_steps=50,
steps=100,
alpha=1.,
tol=1e-6,
ls_tau=0.5,
args=tuple(),
has_aux=False,
val_and_grad_fn=None,
state=None,
callback=None):
"""Optimize a function with the lbfgs algorithm.
This implementation allows for dictionaries of parameters and the
possibility that the function fn can not be jitted (e.g. contains a pmap).
Thus it makes use of native python loops but can be jitted externally
to make the optimization loops faster.
Args:
fn: The function to be minimized, called with a single argument params.
params: A dict of parameters to be passed to the function fn. The values
must be dict or numpy arrays.
memory: The number of steps of history to store for the algorithm. This
governs the accuracy of the underlying Hessian approximation while trading
off the memory usage of the algorithm.
ls_steps: Number of linesearch steps to do at each LBFGS iteration.
steps: The total number of optimization steps to perform.
alpha: Initial step size for the linesearch.
tol: Convergence tolerance.
ls_tau: Scalar to multiply the step size by for each linesearch increment,
in (0, 1)
args: A tuple containing additional positional arguments to pass to fn, as
in result = fn(params, *args)
has_aux: Boolean indicating whether fn returns anything in addition to a
scalar value, as in jax.value_and_grad.
val_and_grad_fn: A function that returns the value and gradient of fn, as
provided by jax.value_and_grad.
state: A list or tuple containing internal state of the optimizer, to be
passed in if this is called multiple times in a row to maintain the
Hessian estimate.
callback: an optional callback function.
Returns:
params: A new set of parameters corresponding to the result of the
optimization.
state: A tuple containing the state of the optimizer, i.e. this is to be
passed back in to the function to reconstruct the Hessian estimate if this
is called repeatedly.
"""
if val_and_grad_fn is None:
val_and_grad_fn = jax.value_and_grad(fn, has_aux=has_aux)
if isinstance(params, frozen_dict.FrozenDict):
# Flax generates parameters as FrozenDict, whose copy() function
# takes a dict argument as input.
copy_fn = lambda x: x.copy({})
else:
copy_fn = lambda x: x.copy()
if state is None:
s_k = []
y_k = []
val, grads = val_and_grad_fn(params, *args)
grad_norm = _dict_vdot(grads, grads)
if grad_norm <= tol:
logging.info("LBFGS converged at start.")
return val, params, None
if has_aux: # Grab just the loss if fn returns multiple things.
val, aux = val
descent_dir = jax.tree_map(lambda x: -x, grads)
old_params = copy_fn(params)
old_grads = copy_fn(grads)
init_alpha = 1. / jnp.sqrt(grad_norm)
new_val, step_size = backtracking_linesearch(
val_and_grad_fn,
val,
params,
grads,
descent_dir,
init_alpha,
tau=ls_tau,
args=args,
has_aux=has_aux,
max_steps=ls_steps)
if new_val < val:
params = jax.tree_multimap(lambda a, b: a + b * step_size, params,
descent_dir)
else:
logging.info("Linesearch did not make progress.")
new_val = (new_val, aux) if has_aux else new_val
return new_val, params, (s_k, y_k, old_grads, old_params)
else:
s_k, y_k, old_grads, old_params = state
for i in range(steps):
val, grads = val_and_grad_fn(params, *args)
if | |
pa.append("G04 Greetings! *\n")
pa.append("G04 This Gerber was generated by PCBmodE, an open source *\n")
pa.append("G04 PCB design software. Get it here: *\n")
pa.append("G04 *\n")
pa.append("G04 http://pcbmode.com *\n")
pa.append("G04 *\n")
pa.append("G04 Also visit *\n")
pa.append("G04 *\n")
pa.append("G04 http://boldport.com *\n")
pa.append("G04 *\n")
pa.append("G04 and follow @boldport / @pcbmode for updates! *\n")
pa.append("G04 *\n")
pa.append("\n")
# version %s on %s GMT; *\n" % (config.cfg['version'], datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")))
# Define figure format
pa += self._getParamCommand("FSLAX%d%dY%d%d" % (self._digits,
self._decimals,
self._digits,
self._decimals),
"leading zeros omitted (L); absolute data (A); %s integer digits and %s fractional digits" % (self._digits, self._decimals))
pa.append("\n")
# Define units
pa += self._getParamCommand("MOMM", "mode (MO): millimeters (MM)")
pa.append("\n")
pa.append("G04 Aperture definitions *\n")
# Fixed circular aperture used for closed shapes
pa.append("%%ADD%dC,%.3fX*%%\n" % (self._closed_shape_aperture_num,
0.001))
pa.append("%%ADD%dC,%.3fX*%%\n" % (self._pad_flashes_aperture_num,
0.001))
# List all apertures captured for this sheet
for aperture in self._apertures:
pa.append("%%ADD%dC,%.2fX*%%\n" % (self._apertures[aperture],
float(aperture)))
pa.append("\n")
return pa
def _getGerberGrammar(self):
"""
Returns the grammar of Gerber
"""
gerber_dictionary = {
"G04": { "text": "comment" },
"G36": { "text": "closed-shape-start" },
"G37": { "text": "closed-shape-end" },
"MO": { "text": "units",
"MM": { "text": "mm" },
"IN": { "text": "inch" }
},
"AD": { "text": "aperture-definition",
"C": { "text": "circle" },
"R": { "text": "rectangle" }
},
"FS": { "text": "format" ,
"L": { "text": "leading-zeros" },
"A": { "text": "absolute" }
},
"D01": { "text": "draw"},
"D02": { "text": "move"},
"D03": { "text": "flash"}
}
# Define grammar using pyparsing
space = pyp.Literal(' ')
comma = pyp.Literal(',').suppress()
# Capture a float string and cast to float
floatnum = pyp.Regex(r'([\d\.]+)').setParseAction(lambda t: float(t[0]))
# Capture integer string and cast to int
integer = pyp.Regex(r'(-?\d+)').setParseAction(lambda t: int(t[0]))
# Capture single digit string and cast to int
single_digit = pyp.Regex(r'(\d)').setParseAction(lambda t: int(t[0]))
aperture = pyp.Literal('D').setParseAction(pyp.replaceWith('aperture'))
coord_x = pyp.Literal('X').setParseAction(pyp.replaceWith('x'))
coord_y = pyp.Literal('Y').setParseAction(pyp.replaceWith('y'))
gcoord = pyp.Regex(r'(-?\d+)')
coord_dict = pyp.dictOf((coord_x | coord_y), gcoord)
coord_xy = pyp.Group(coord_dict + coord_dict)
inst_del = pyp.Literal('%').suppress() # instruction delimeter
inst_end = pyp.Literal('*').suppress() # ending suffix
cmd_comment = pyp.Literal('G04').setParseAction(pyp.replaceWith('comment'))
cmd_closed_shape_start = pyp.Literal('G36')
cmd_closed_shape_end = pyp.Literal('G37')
cmd_units = pyp.Literal('MO')('gerber-command')
cmd_units_opt_mm = pyp.Literal('MM').setParseAction(pyp.replaceWith('mm'))
cmd_units_opt_inch = pyp.Literal('IN').setParseAction(pyp.replaceWith('inch'))
cmd_format = pyp.Literal('FS')('gerber-command')
cmd_format_opt_leading_zeros = pyp.Literal('L').setParseAction(pyp.replaceWith('leading'))
cmd_format_opt_trailing_zeros = pyp.Literal('T').setParseAction(pyp.replaceWith('trailing'))
cmd_format_opt_absolute = pyp.Literal('A').setParseAction(pyp.replaceWith('absolute'))
cmd_format_opt_incremental = pyp.Literal('I').setParseAction(pyp.replaceWith('incremental'))
# Aperture definition
cmd_ap_def = pyp.Literal('AD')('gerber-command')
cmd_ap_def_num = 'D' + integer.setResultsName('number')
cmd_ap_def_opt_circ = pyp.Literal('C').setParseAction(pyp.replaceWith('circle'))
cmd_ap_def_opt_rect = pyp.Literal('R').setParseAction(pyp.replaceWith('rect'))
cmd_polarity = pyp.Literal('LP')('gerber-command')
cmd_polarity_opt_dark = pyp.Literal('D').setParseAction(pyp.replaceWith('dark'))
cmd_polarity_opt_clear = pyp.Literal('C').setParseAction(pyp.replaceWith('clear'))
cmd_linear_int = pyp.Literal('G01').suppress() # lineal interpolation
cmd_circ_int_cw = pyp.Literal('G02').suppress() # circular int. clockwise
cmd_circ_int_ccw = pyp.Literal('G03').suppress() # circular int. counter-clockwise
aperture_type = (((cmd_ap_def_opt_circ('type') + comma) + (floatnum)('diameter') + 'X') |
((cmd_ap_def_opt_rect('type') + comma) + (floatnum)('width') + 'X' + (floatnum)('height')))
polarity_type = (cmd_polarity_opt_clear | cmd_polarity_opt_dark)('polarity')
units_type = (cmd_units_opt_mm | cmd_units_opt_inch)('units')
format_zeros = ((cmd_format_opt_leading_zeros('zeros')) |
(cmd_format_opt_trailing_zeros('zeros')))
format_notation = ((cmd_format_opt_absolute('notation')) |
(cmd_format_opt_incremental('notation')))
format_data = (single_digit)('integer') + single_digit('decimal')
# comments (suppress)
comment = (cmd_comment +
pyp.Optional(space) +
pyp.Regex(r"([^\*]+)?") +
pyp.Optional(space) +
inst_end).suppress()
units = (inst_del +
pyp.Group(cmd_units +
units_type)('units') +
inst_end +
inst_del)
gformat = (inst_del +
pyp.Group(cmd_format +
format_zeros +
format_notation +
'X' + pyp.Group(format_data)('x') +
'Y' + pyp.Group(format_data)('y'))('format') +
inst_end +
inst_del)
ap_def = (inst_del +
pyp.Group(cmd_ap_def +
cmd_ap_def_num +
aperture_type)('aperture_definition') +
inst_end +
inst_del)
polarity = (inst_del +
pyp.Group(cmd_polarity +
polarity_type)('polarity_change') +
inst_end +
inst_del)
closed_shape_start = (cmd_closed_shape_start('start_closed_shape') + inst_end)
closed_shape_end = (cmd_closed_shape_end('end_closed_shape') + inst_end)
draw = pyp.Group(pyp.Optional(cmd_linear_int) +
'X' + (integer)('x') +
'Y' + (integer)('y') +
pyp.Literal('D01').suppress() +
inst_end)('draw')
move = pyp.Group(pyp.Optional(cmd_linear_int) +
'X' + (integer)('x') +
'Y' + (integer)('y') +
pyp.Literal('D02').suppress() +
inst_end)('move')
flash = pyp.Group(pyp.Optional(cmd_linear_int) +
'X' + (integer)('x') +
'Y' + (integer)('y') +
pyp.Literal('D03').suppress() +
inst_end)('flash')
aperture_change = (pyp.Literal('D').suppress() +
pyp.Group(integer('number') + inst_end)('aperture_change'))
# end of file (suppress)
the_end = (pyp.Literal('M02') + inst_end)('end_of_gerber')
grammar = (comment |
units |
gformat |
ap_def |
aperture_change |
draw | move | flash |
polarity |
closed_shape_start |
closed_shape_end |
the_end)
return pyp.OneOrMore(pyp.Group(grammar))
def gerbers_to_svg(manufacturer='default'):
"""
Takes Gerber files as input and generates an SVG of them
"""
def normalise_gerber_number(gerber_number, axis, form):
"""
Takes a Gerber number and converts it into a float using
the formatting defined in the Gerber header
"""
# TODO: actually support anything other than leading zeros
number = gerber_number / pow(10.0, form[axis]['decimal'])
return number
def parsed_grammar_to_dict(parsed_grammar):
"""
Converts the Gerber parsing results to an SVG.
"""
gerber_dict = {}
current_aperture = None
new_shape = True
for line in parsed_grammar:
if line.dump():
if (line.format):
if gerber_dict.get('format') is None:
gerber_dict['format'] = {}
tmp = gerber_dict['format']
tmp['notation'] = line['format']['notation']
tmp['zeros'] = line['format']['zeros']
tmp['x'] = {}
tmp['x']['integer'] = line['format']['x']['integer']
tmp['x']['decimal'] = line['format']['x']['decimal']
tmp['y'] = {}
tmp['y']['integer'] = line['format']['x']['integer']
tmp['y']['decimal'] = line['format']['x']['decimal']
elif (line.units):
gerber_dict['units'] = line['units']['units']
elif (line.aperture_definition):
tmp = {}
if line['aperture_definition']['type'] == 'circle':
tmp['type'] = 'circle'
tmp['diameter'] = line['aperture_definition']['diameter']
tmp['number'] = line['aperture_definition']['number']
elif line['aperture_definition']['type'] == 'rect':
tmp['type'] = 'rect'
tmp['width'] = line['aperture_definition']['width']
tmp['height'] = line['aperture_definition']['height']
tmp['number'] = line['aperture_definition']['number']
else:
print("ERROR: cannot recognise aperture definition type")
if gerber_dict.get('aperture-definitions') is None:
gerber_dict['aperture-definitions'] = []
gerber_dict['aperture-definitions'].append(tmp)
elif line.polarity_change:
if gerber_dict.get('features') is None:
gerber_dict['features'] = []
polarity = line['polarity_change']['polarity']
polarity_dict = {}
polarity_dict['polarity'] = polarity
polarity_dict['shapes'] = []
gerber_dict['features'].append(polarity_dict)
elif line.aperture_change:
tmp = {}
tmp['type'] = 'aperture-change'
tmp['number'] = line.aperture_change['number']
#if len(gerber_dict['features'][-1]['shapes'] == 0):
gerber_dict['features'][-1]['shapes'].append(tmp)
#else:
# gerber_dict['features'][-1]['shapes'].append(tmp)
tmp = {}
tmp['type'] = 'stroke'
tmp['segments'] = []
gerber_dict['features'][-1]['shapes'].append(tmp)
elif line.start_closed_shape:
tmp = {}
tmp['type'] = 'fill'
tmp['segments'] = []
gerber_dict['features'][-1]['shapes'].append(tmp)
elif line.move or line.draw or line.flash:
# TODO: hack alert! (Got to get shit done, you know? Don't judge me!)
if line.move:
command_name = 'move'
item = line.move
if line.draw:
command_name = 'draw'
item = line.draw
if line.flash:
command_name = 'flash'
item = line.flash
point = Point(normalise_gerber_number(item['x'], 'x', gerber_dict['format']), normalise_gerber_number(item['y'], 'y', gerber_dict['format']))
tmp = {}
tmp['type'] = command_name
tmp['coord'] = point
gerber_dict['features'][-1]['shapes'][-1]['segments'].append(tmp)
elif line.end_closed_shape:
new_shape = True
return gerber_dict
def create_gerber_svg_data(gerber_data):
"""
Returns an SVG element of the input Gerber data
"""
gerber_data_parsed = gerber_grammar.parseString(gerber_data)
gerber_data_dict = parsed_grammar_to_dict(gerber_data_parsed)
gerber_data_svg = svg.generate_svg_from_gerber_dict(gerber_data_dict)
return gerber_data_svg
# get the board's shape / outline
board_shape_gerber_lp = None
shape = config.brd['board_outline']['shape']
board_shape_type = shape.get('type')
if board_shape_type in ['rect', 'rectangle']:
offset = utils.to_Point(shape.get('offset') or [0, 0])
board_shape_path = svg.rect_to_path(shape)
elif board_shape_type == 'path':
board_shape_path = shape.get('value')
board_shape_gerber_lp = shape.get('gerber_lp')
if board_shape_path is None:
print("ERROR: couldn't find a path under key 'value' for board outline")
else:
print("ERROR: unrecognised board shape type: %s. Possible options are 'rect' or 'path'" % board_shape_type)
# convert path to relative
board_shape_path_relative = svg.absolute_to_relative_path(board_shape_path)
# this will return a path having an origin at the center of the shape
# defined by the path
board_width, board_height, board_outline = svg.transform_path(board_shape_path_relative, True)
display_width = board_width
display_height = board_height
#transform = 'translate(' + str(round((board_width)/2, SD)) + ' ' + str(round((board_height)/2, SD)) + ')'
sig_dig = config.cfg['significant-digits']
#transform = 'translate(%s %s)' % (round(board_width/2, sig_dig),
# round(board_height/2, sig_dig))
# extra buffer for display frame
display_frame_buffer = config.cfg.get('display-frame-buffer') or 1.0
gerber = et.Element('svg',
width=str(display_width) + config.brd['config']['units'],
height=str(display_height) + config.brd['config']['units'],
viewBox=str(-display_frame_buffer/2) + ' ' + str(-display_frame_buffer/2) + ' ' + str(board_width+display_frame_buffer) + ' ' + str(board_height + display_frame_buffer),
version='1.1',
nsmap=cfg['namespace'],
fill='black')
doc = et.ElementTree(gerber)
gerber_layers = svg.create_layers_for_gerber_svg(gerber)
# directory for where to expect the Gerbers within the build path
# regardless of the source of the Gerbers, the PCBmodE directory
# structure is assumed
production_path = os.path.join(config.cfg['base-dir'],
config.cfg['locations']['build'],
'production')
# get board information from configuration file
pcbmode_version = config.cfg['version']
board_name = config.cfg['name']
board_revision = config.brd['config'].get('rev')
base_name = "%s_rev_%s" % (board_name, board_revision)
gerber_grammar = gerber_grammar_generator()
for foil in ['outline']:#, 'documentation']:
gerber_file = os.path.join(production_path, base_name + '_%s.ger'% (foil))
gerber_data = open(gerber_file, 'r').read()
gerber_svg = create_gerber_svg_data(gerber_data)
gerber_svg_layer = gerber_layers[foil]['layer']
gerber_svg_layer.append(gerber_svg)
print(foil)
| |
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='OpenIntegration',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def patch_conversations_messaging_integrations_twitter_integration_id(self, integration_id, body, **kwargs):
"""
Update Twitter messaging integration
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.patch_conversations_messaging_integrations_twitter_integration_id(integration_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str integration_id: Integration ID (required)
:param TwitterIntegrationRequest body: TwitterIntegrationRequest (required)
:return: TwitterIntegration
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['integration_id', 'body']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_conversations_messaging_integrations_twitter_integration_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'integration_id' is set
if ('integration_id' not in params) or (params['integration_id'] is None):
raise ValueError("Missing the required parameter `integration_id` when calling `patch_conversations_messaging_integrations_twitter_integration_id`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_conversations_messaging_integrations_twitter_integration_id`")
resource_path = '/api/v2/conversations/messaging/integrations/twitter/{integrationId}'.replace('{format}', 'json')
path_params = {}
if 'integration_id' in params:
path_params['integrationId'] = params['integration_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TwitterIntegration',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def patch_conversations_messaging_integrations_whatsapp_integration_id(self, integration_id, body, **kwargs):
"""
Update or activate a WhatsApp messaging integration.
The following steps are required in order to fully activate a Whatsapp Integration: Initially, you will need to get an activation code by sending: an action set to Activate, and an authenticationMethod choosing from Sms or Voice. Finally, once you have been informed of an activation code on selected authenticationMethod, you will need to confirm the code by sending: an action set to Confirm, and the confirmationCode you have received from Whatsapp.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.patch_conversations_messaging_integrations_whatsapp_integration_id(integration_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str integration_id: Integration ID (required)
:param WhatsAppIntegrationUpdateRequest body: WhatsAppIntegrationUpdateRequest (required)
:return: WhatsAppIntegration
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['integration_id', 'body']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_conversations_messaging_integrations_whatsapp_integration_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'integration_id' is set
if ('integration_id' not in params) or (params['integration_id'] is None):
raise ValueError("Missing the required parameter `integration_id` when calling `patch_conversations_messaging_integrations_whatsapp_integration_id`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_conversations_messaging_integrations_whatsapp_integration_id`")
resource_path = '/api/v2/conversations/messaging/integrations/whatsapp/{integrationId}'.replace('{format}', 'json')
path_params = {}
if 'integration_id' in params:
path_params['integrationId'] = params['integration_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='WhatsAppIntegration',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def post_analytics_conversation_details_properties(self, conversation_id, body, **kwargs):
"""
Index conversation properties
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.post_analytics_conversation_details_properties(conversation_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str conversation_id: conversationId (required)
:param PropertyIndexRequest body: request (required)
:return: PropertyIndexRequest
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['conversation_id', 'body']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_analytics_conversation_details_properties" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'conversation_id' is set
if ('conversation_id' not in params) or (params['conversation_id'] is None):
raise ValueError("Missing the required parameter `conversation_id` when calling `post_analytics_conversation_details_properties`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `post_analytics_conversation_details_properties`")
resource_path = '/api/v2/analytics/conversations/{conversationId}/details/properties'.replace('{format}', 'json')
path_params = {}
if 'conversation_id' in params:
path_params['conversationId'] = params['conversation_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PropertyIndexRequest',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def post_analytics_conversations_aggregates_query(self, body, **kwargs):
"""
Query for conversation aggregates
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.post_analytics_conversations_aggregates_query(body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param ConversationAggregationQuery body: query (required)
:return: ConversationAggregateQueryResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_analytics_conversations_aggregates_query" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `post_analytics_conversations_aggregates_query`")
resource_path = '/api/v2/analytics/conversations/aggregates/query'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ConversationAggregateQueryResponse',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def post_analytics_conversations_details_jobs(self, body, **kwargs):
"""
Query for conversation details asynchronously
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.post_analytics_conversations_details_jobs(body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param AsyncConversationQuery body: query (required)
:return: AsyncQueryResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_analytics_conversations_details_jobs" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `post_analytics_conversations_details_jobs`")
resource_path = '/api/v2/analytics/conversations/details/jobs'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
| |
import sys
import time
import yaml
import math
import signal
import datetime
import threading
import traceback
import numpy as np
from cvxopt import matrix, solvers
#from scipy.spatial import ConvexHull
import matplotlib.patches as ptc
import matplotlib.pyplot as plt
import matplotlib.animation as animation
# from actions import *
COLORS = [(0.0, 0.0, 0.0), (0.99, 0.0, 0.0), (0.0, 0.99, 0.0), (0.0, 0.0, 0.99), (0.99, 0.99, 0.0), (0.99, 0.0, 0.99), (0.0, 0.99, 0.99)]
global_boundary = []
xlim = []
ylim = []
test_type = 0
world = None
def is_in_space(p, tol):
global xlim, ylim
return xlim[0] - tol <= p[0] <= xlim[1] + tol and ylim[0] - tol <= p[1] <= ylim[1] + tol
def is_in_bounding_polygon(p, tol):
global global_boundary
pass
def angle_in_2pi(v):
angle = np.arctan2(v[1], v[0])
#if angle <= 0:
# angle += 2 * np.pi
return angle
def to_grid(x, y, x_off, y_off):
return (x - x_off, y - y_off)
#def get_convex_hull(V):
# hull = ConvexHull(V)
# return [V[vertex] for vertex in hull.vertices]
def appendGlobalBoundaries(B):
bottom_left = globals()['global_boundary'][0]
top_right = globals()['global_boundary'][3]
B.append((np.array([1., 0.], dtype=float), np.array(bottom_left, dtype=float)))
B.append((np.array([0., 1.], dtype=float), np.array(bottom_left, dtype=float)))
B.append((np.array([1., 0.], dtype=float), np.array(top_right, dtype=float)))
B.append((np.array([0., 1.], dtype=float), np.array(top_right, dtype=float)))
def angularSort(reference, vertices):
vectors = [p - reference for p in vertices]
indexed_angles = [(angle_in_2pi(vectors[i]), i) for i in range(len(vectors))]
#if self.name == "uav1":
# print("------")
# for i in range(len(vectors)):
# print(vectors[i], indexed_angles[i][0])
# print("------")
indexed_angles.sort()
return [vertices[i] for _, i in indexed_angles]
class StateBuffer:
def __init__(self):
self.buffers = dict()
def getState(self, name):
return self.buffers[name]
def getAllStates(self):
return dict(self.buffers)
def updateState(self, name, s):
self.buffers[name] = s
class Agent:
def __init__(self, name, init, goal, vmax):
self.name = name
self.move_thread = threading.Thread(name="{}_move".format(self.name), target=self.move)
self.sim_log = open('LOG_{}.txt'.format(self.name), 'w+')
self.terminate = False
self.phys_radius = 2.0
self.safe_radius = 3.0
self.comm_radius = 10.0
self.dt = 0.1
self.vmax = vmax
self.vmin = 0.5
self.velocity = np.zeros(2)
self.position = np.array(init, dtype=float)
self.voronoi_graph = []
#self.color = tuple(np.random.rand(3))
self.color = globals()['COLORS'][int(self.name[3:])]
self.inter_sort_type = [('angle', float), ('vector', np.ndarray)]
self.world = None
self.world_offset = (globals()['xlim'][0], globals()['ylim'][0])
self.frontier = set()
self._B = np.array([[1., 0.], [0., 1.], [1., 0.], [0., 1.]], dtype=float)
self.neighbours = dict()
# self.path = []
# self.curves = []
self.xhistory = []
self.yhistory = []
self.goal = np.array(goal, dtype=float)
self.goal_change = 10.
self.converged = False
self.H = matrix([[2., 0.], [0., 2.]], tc='d')
# STATE:
self.state = {'pos': self.position, 'vel': self.velocity, 'end': False}
self.advertiseState()
def initialize_world(self):
#global xlim, ylim
#W = xlim[1] - xlim[0]
#H = ylim[1] - ylim[0]
#self.world = np.zeros((H, W))
#grid_node = to_grid(self.position[0], self.position[1], xlim[1], ylim[1])
#v_act = valid_actions(self.world, grid_node)
#for act in v_act:
# applied_coord = apply_action_to_node(grid_node, act)
# pass
pass
def initialize(self):
#print("Initializing agent {}".format(self.name))
#print("Agent {} --> {}".format(self.name, self.goal))
self.move_thread.start()
def setGoal(self, g):
self.goal_change = np.linalg.norm(g - self.goal)
self.converged = self.goal_change <= 0.1
self.goal = np.array(g, dtype=float)
def hasReachedGoal(self):
return np.linalg.norm(self.goal - self.state['pos']) <= 0.1 and self.converged
def getCentroid(self):
### SOURCE: https://en.wikipedia.org/wiki/Centroid
# Calculate area with Shoelace Formula
area = 0
for i in range(len(self.voronoi_graph) - 1):
x_i, y_i = self.voronoi_graph[i]
x_j, y_j = self.voronoi_graph[i + 1]
area += x_i * y_j - x_j * y_i
area *= 0.5
# Calculate centroid of voronoi cell
Cx, Cy = 0, 0
for i in range(len(self.voronoi_graph) - 1):
x_i, y_i = self.voronoi_graph[i]
x_j, y_j = self.voronoi_graph[i + 1]
product = (x_i * y_j - x_j * y_i)
Cx += (x_i + x_j) * product
Cy += (y_i + y_j) * product
return np.array([Cx, Cy], dtype=float) / (6. * area)
def computeBisectors(self):
bisectors = [] # (normal, point)
cons, vals = [], []
tol = 0.1
for a, st in self.neighbours.items():
if st is None:
continue
if np.any(np.isnan(st['pos'])):
print(f'Agent {self.name} neighbour {a} has NaN!')
normal = (st['pos'] - self.state['pos']).round(4)
m = ((st['pos'] + self.state['pos']) * 0.5).round(4)
bisectors.append((normal, m))
cons.append(normal)
#vals.append(m.dot(normal) - self.safe_radius)
vals.append((m.dot(normal)).round(4))
# bottom_left = globals()['global_boundary'][0]
# top_right = globals()['global_boundary'][3]
# bisectors.append((np.array([1., 0.], dtype=float), np.array(bottom_left, dtype=float)))
# bisectors.append((np.array([0., 1.], dtype=float), np.array(bottom_left, dtype=float)))
# bisectors.append((np.array([1., 0.], dtype=float), np.array(top_right, dtype=float)))
# bisectors.append((np.array([0., 1.], dtype=float), np.array(top_right, dtype=float)))
appendGlobalBoundaries(bisectors)
A = np.array(cons, dtype=float)
b = np.array(vals, dtype=float)
self.voronoi_graph = []
for i in range(len(bisectors)):
n_i, m_i = bisectors[i]
d_i = m_i.dot(n_i)
for j in range(i + 1, len(bisectors)):
n_j, m_j = bisectors[j]
d_j = m_j.dot(n_j)
try:
A_ = np.array([n_i.round(4), n_j.round(4)], dtype=float)
b_ = np.array([d_i.round(4), d_j.round(4)], dtype=float)
p = (np.linalg.solve(A_, b_)).round(4)
except np.linalg.LinAlgError:
continue
except:
print(traceback.format_exc())
continue
if is_in_space(p, tol) and np.all(A.dot(p) <= b + 0.1):
self.voronoi_graph.append(p)
A_iq = matrix(np.array(cons), tc='d')
b_iq = matrix(np.array(vals), tc='d')
self.voronoi_graph = angularSort(self.position, self.voronoi_graph)
#self.voronoi_graph = get_convex_hull(self.voronoi_graph)
return A_iq, b_iq
def solveStep(self, A_iq, b_iq, _t=0):
v_next = self.state['vel']
if _t == 0:
## Buffered Voronoi Cell
if A_iq and b_iq:
solvers.options['show_progress'] = False
sol = solvers.qp(self.H, matrix(-2. * self.goal, tc='d'), A_iq, b_iq)
#print("Agent {} SOLN: {}".format(self.name, sol['x']))
v_next = (np.array(sol['x'][0]) - self.state['pos']) / self.dt
_norm = np.linalg.norm(v_next)
if _norm > self.vmax:
v_next = self.vmax * v_next / _norm
return v_next
elif _t == 1:
## <NAME>
if len(self.voronoi_graph):
self.voronoi_graph.append(self.voronoi_graph[0])
self.setGoal(self.getCentroid())
v_next = self.goal - self.state['pos']
_norm = np.linalg.norm(v_next)
if _norm > self.vmax:
v_next *= self.vmax / np.linalg.norm(v_next)
return v_next
print(f'Agent {self.name} stopped momentarily.')
return np.zeros(2)
def doStep(self, v_next):
x_, y_ = self.state['pos'][0], self.state['pos'][1]
self.xhistory.append(x_)
self.yhistory.append(y_)
self.state['pos'] = self.state['pos'] + self.dt * v_next
self.state['vel'] = v_next
def stepLog(self, _t=0):
if _t == 0:
self.sim_log.write('{} - pos: {} - vel: {} - at: {}\n'.format(self.name, self.position, self.velocity, datetime.datetime.now()))
elif _t == 1:
# Agent name; current position; next goal
#self.sim_log.write('{};{};{}\n'.format(self.name, self.position, self.goal))
#self.sim_log.write(f'{self.name};{self.voronoi_graph.dfs_traversal()}\n')
#self.sim_log.write(f'{self.name};{self.voronoi_graph}\n')
pass
def updateNeighbours(self):
for uav, st in globals()['buf'].buffers.items():
if uav == self.name or st is None:
continue
self.neighbours[uav] = dict(st)
def advertiseState(self):
globals()['buf'].updateState(self.name, self.state)
def stop(self):
self.terminate = True
def move(self):
test = globals()['test_type']
pre_flight_count = 20
#while not self.terminate and not self.hasReachedGoal():
while not self.terminate:
_start = time.time()
self.advertiseState()
self.updateNeighbours()
if pre_flight_count < 1:
A, b = self.computeBisectors()
v_next = self.solveStep(A, b, test)
self.doStep(v_next)
self.stepLog(test)
else:
pre_flight_count -= 1
_elapsed = time.time() - _start
fail_hard = _elapsed >= self.dt
if fail_hard:
#print('Agent {} failed hard real-time constraint at {}'.format(self.name, datetime.datetime.now()))
pass
else:
time.sleep(self.dt - _elapsed)
self.state['end'] = True
if self.hasReachedGoal():
print("Agent {} has reached goal at {}".format(self.name, datetime.datetime.now()))
self.sim_log.close()
class Simulator:
def __init__(self, pfile):
self.xlim = [-20, 80]
self.ylim = [-20, 80]
self.count = 0
self.agents = dict()
self.vmax = 0
self.iteration = 0
self.loadParams(pfile)
#self.logfile = open('SimulatorLog.txt', 'w+')
self.terminate = False
self.distance_thread = threading.Thread(name='distance_thread', target=self.checkCollision)
self.fig = plt.figure()
self.ax = self.fig.add_subplot(1, 1, 1)
#self.fig, self.axs = plt.subplots(2)
self.ani = None
def loadParams(self, pfile):
params = None
with open(pfile) as P:
params = yaml.load(P, Loader=yaml.FullLoader)
self.xlim = np.array(params['xlim'], dtype=float)
self.ylim = np.array(params['ylim'], dtype=float)
self.count = params['count']
self.vmax = params['vmax']
globals()['test_type'] = params['test_type']
globals()['world'] = np.zeros((int(self.ylim[1] - self.ylim[0]), int(self.xlim[1] - self.xlim[0])), dtype=int)
globals()['xlim'] = np.array(self.xlim, dtype=float)
globals()['ylim'] = np.array(self.ylim, dtype=float)
#globals()['global_boundary'] = np.array([[i, j] for i in self.xlim for j in self.ylim], dtype=float)
globals()['global_boundary'] = np.array([vertex for vertex in params['bounding_polygon']], dtype=float)
#sorted_boundary = angularSort(np.mean(globals()['global_boundary'], axis=0), globals()['global_boundary'])
self.bounding_poly_plt = ptc.Polygon(angularSort(np.mean(globals()['global_boundary'], axis=0), globals()['global_boundary']),
color=(0, 0, 0), fill=False)
for entry in params['uav']:
self.agents[entry[0]] = Agent(entry[0], entry[1], entry[2], self.vmax)
def isDone(self):
return all([a.state['end'] for _, a in self.agents.items()])
def checkCollision(self):
if not self.agents:
return
try:
while not self.terminate:
ax, ay = list(zip(*[tuple(a.state['pos']) for _, a in self.agents.items()]))
X = np.array(ax, dtype=float)
Y = np.array(ay, dtype=float)
XX1, XX2 = np.meshgrid(X, X)
YY1, YY2 = np.meshgrid(Y, Y)
pairwise_dists = np.sqrt((XX2 - XX1) ** 2 + (YY2 - YY1) ** 2)
R, C = pairwise_dists.shape
for i in range(R):
for j in range(C):
if j < i and pairwise_dists[i, j] <= 2.0:
print('COLLISION between agents uav{} and uav{} at {}'.format(i, j, datetime.datetime.now()))
time.sleep(1)
except Exception:
print(traceback.format_exc())
def animate_motion(self, i):
self.ax.clear()
self.ax.set_xlim(self.xlim[0] - 5, self.xlim[1] + 5)
self.ax.set_ylim(self.ylim[0] - 5, self.ylim[1] + 5)
self.iteration += 1
for _, a in self.agents.items():
pos = a.state['pos']
vel = a.state['vel']
angle = np.arctan2(vel[1], vel[0])
circle = plt.Circle(tuple(pos), 2., color=a.color)
self.ax.quiver(pos[0], pos[1], np.cos(angle), np.sin(angle), color=a.color)
self.ax.add_artist(circle)
self.ax.plot(a.xhistory, a.yhistory, color=a.color)
self.ax.add_patch(self.bounding_poly_plt)
polygon = a.voronoi_graph
if len(polygon) < 3:
continue
poly = plt.Polygon(polygon, alpha=0.4, color=a.color)
self.ax.add_patch(poly)
def stop(self):
self.terminate = True
def run(self):
print("Run starts at {}".format(datetime.datetime.now()))
for _, a in | |
<filename>rlpy/domains/domain.py
"""Domain base class"""
from abc import ABC, abstractmethod
from copy import deepcopy
import logging
import numpy as np
__copyright__ = "Copyright 2013, RLPy http://acl.mit.edu/RLPy"
__credits__ = [
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
]
__license__ = "BSD 3-Clause"
class Domain(ABC):
"""
The Domain controls the environment in which the
:py:class:`~rlpy.agents.agent.Agent` resides as well as the reward function the
Agent is subject to.
The Agent interacts with the Domain in discrete timesteps called
*episodes* (see :py:meth:`~rlpy.domains.domain.Domain.step`).
At each step, the Agent informs the Domain what indexed action it wants to
perform. The Domain then calculates the effects this action has on the
environment and updates its internal state accordingly.
It also returns the new state to the agent, along with a reward/penalty,
and whether or not the episode is over (thus resetting the agent to its
initial state).
This process repeats until the Domain determines that the Agent has either
completed its goal or failed.
The :py:class:`~rlpy.experiments.experiment.Experiment` controls this cycle.
Because agents are designed to be agnostic to the Domain that they are
acting within and the problem they are trying to solve, the Domain needs
to completely describe everything related to the task. Therefore, the
Domain must not only define the observations that the Agent receives,
but also the states it can be in, the actions that it can perform, and the
relationships between the three.
The Domain class is a base clase that provides the basic framework for all
domains. It provides the methods and attributes that allow child classes
to interact with the Agent and Experiment classes within the RLPy library.
domains should also provide methods that provide visualization of the
Domain itself and of the Agent's learning
(:py:meth:`~rlpy.domains.domain.Domain.show_domain` and
:py:meth:`~rlpy.domains.domain.Domain.show_learning` respectively) \n
All new domain implementations should inherit from :py:class:`~rlpy.domains.domain.domain`.
.. note::
Though the state *s* can take on almost any value, if a dimension is not
marked as 'continuous' then it is assumed to be integer.
"""
def __init__(
self,
num_actions,
statespace_limits,
discount_factor=0.9,
continuous_dims=None,
episode_cap=None,
):
"""
:param num_actions: The number of Actions the agent can perform
:param discount_factor: The discount factor by which rewards are reduced
:param statespace_limits: Limits of each dimension of the state space.
Each row corresponds to one dimension and has two elements [min, max]
:param state_space_dims: Number of dimensions of the state space
:param continuous_dims: List of the continuous dimensions of the domain
:param episode_cap: The cap used to bound each episode (return to state 0 after)
"""
self.num_actions = num_actions
self.raw_statespace_limits = statespace_limits.copy()
self.statespace_limits = statespace_limits
self.discount_factor = float(discount_factor)
if continuous_dims is None:
self.num_states = int(
np.prod(self.statespace_limits[:, 1] - self.statespace_limits[:, 0] + 1)
)
self.continuous_dims = []
else:
self.num_states = np.inf
self.continuous_dims = continuous_dims
self.episode_cap = episode_cap
self.random_state = np.random.RandomState()
self.state_space_dims = self.statespace_limits.shape[0]
# For discrete domains, limits should be extended by half on each side so that
# the mapping becomes identical with continuous states.
# The original limits will be saved in self.discrete_statespace_limits.
self._extendDiscreteDimensions()
self.logger = logging.getLogger("rlpy.domains." + self.__class__.__name__)
self.seed = None
self.performance = False
def set_seed(self, seed):
"""
Set random seed
"""
self.seed = seed
self.random_state.seed(seed)
def __str__(self):
res = """{self.__class__}:
------------
Dimensions: {self.state_space_dims}
|S|: {self.num_states}
|A|: {self.num_actions}
Episode Cap:{self.episode_cap}
Gamma: {self.discount_factor}
""".format(
self=self
)
return res
def show(self, a=None, representation=None):
"""
Shows a visualization of the current state of the domain and that of
learning.
See :py:meth:`~rlpy.domains.domain.Domain.show_domain()` and
:py:meth:`~rlpy.domains.domain.Domain.show_learning()`,
both called by this method.
.. note::
Some domains override this function to allow an optional *s*
parameter to be passed, which overrides the *self.state* internal
to the domain; however, not all have this capability.
:param a: The action being performed
:param representation: The learned value function
:py:class:`~rlpy.Representation.Representation.Representation`.
"""
self.saveRandomState()
self.show_domain(a=a)
self.show_learning(representation=representation)
self.loadRandomState()
def show_domain(self, a=0):
"""
*Abstract Method:*\n
Shows a visualization of the current state of the domain.
:param a: The action being performed.
"""
pass
def show_learning(self, representation):
"""
*Abstract Method:*\n
Shows a visualization of the current learning,
usually in the form of a gridded value function and policy.
It is thus really only possible for 1 or 2-state domains.
:param representation: the learned value function
:py:class:`~rlpy.Representation.Representation.Representation`
to generate the value function / policy plots.
"""
pass
def close_visualizations(self):
"""Close matplotlib windows."""
pass
@abstractmethod
def s0(self):
"""
Begins a new episode and returns the initial observed state of the Domain.
Sets self.state accordingly.
:return: A numpy array that defines the initial domain state.
"""
pass
def possible_actions(self, s=None):
"""
The default version returns an enumeration of all actions [0, 1, 2...].
We suggest overriding this method in your domain, especially if not all
actions are available from all states.
:param s: The state to query for possible actions
(overrides self.state if ``s != None``)
:return: A numpy array containing every possible action in the domain.
.. note::
*These actions must be integers*; internally they may be handled
using other datatypes. See :py:meth:`~rlpy.tools.general_tools.vec2id`
and :py:meth:`~rlpy.tools.general_tools.id2vec` for converting between
integers and multidimensional quantities.
"""
return np.arange(self.num_actions)
# TODO: change 'a' to be 'aID' to make it clearer when we refer to
# actions vs. integer IDs of actions? They aren't always interchangeable.
@abstractmethod
def step(self, a):
"""
*Abstract Method:*\n
Performs the action *a* and updates the Domain
state accordingly.
Returns the reward/penalty the agent obtains for
the state/action pair determined by *Domain.state* and the parameter
*a*, the next state into which the agent has transitioned, and a
boolean determining whether a goal or fail state has been reached.
.. note::
domains often specify stochastic internal state transitions, such
that the result of a (state,action) pair might vary on different
calls (see also the :py:meth:`~rlpy.domains.domain.Domain.sample_step`
method).
Be sure to look at unique noise parameters of each domain if you
require deterministic transitions.
:param a: The action to perform.
.. warning::
The action *a* **must** be an integer >= 0, and might better be
called the "actionID". See the class description
:py:class:`~rlpy.domains.domain.Domain` above.
:return: The tuple (r, ns, t, p_actions) =
(Reward [value], next observed state, is_terminal [boolean])
"""
pass
def saveRandomState(self):
"""
Stores the state of the the random generator.
Using loadRandomState this state can be loaded.
"""
self.random_state_backup = self.random_state.get_state()
def loadRandomState(self):
"""
Loads the random state stored in the self.random_state_backup
"""
self.random_state.set_state(self.random_state_backup)
def is_terminal(self):
"""
Returns ``True`` if the current Domain.state is a terminal one, ie,
one that ends the episode. This often results from either a failure
or goal state being achieved.\n
The default definition does not terminate.
:return: ``True`` if the state is a terminal state, ``False`` otherwise.
"""
return False
def _extendDiscreteDimensions(self):
"""
Offsets discrete dimensions by 0.5 so that binning works properly.
.. warning::
This code is used internally by the Domain base class.
**It should only be called once**
"""
# Store the original limits for other types of calculations
self.discrete_statespace_limits = self.statespace_limits
self.statespace_limits = self.statespace_limits.astype("float")
for d in range(self.state_space_dims):
if d not in self.continuous_dims:
self.statespace_limits[d, 0] += -0.5
self.statespace_limits[d, 1] += +0.5
@property
def statespace_width(self):
return self.statespace_limits[:, 1] - self.statespace_limits[:, 0]
@property
def discrete_statespace_width(self):
return (
self.discrete_statespace_limits[:, 1]
- self.discrete_statespace_limits[:, 0]
)
def all_states(self):
"""Returns an iterator of all states"""
raise NotImplementedError(f"All states is not implemented for {type(self)}")
def sample_step(self, a, num_samples):
"""
Sample a set number of next states and rewards from the domain.
This function is used when state transitions are stochastic;
deterministic transitions will yield an identical result regardless
of *num_samples*, since repeatedly sampling a (state,action) pair
will always yield the same tuple (r,ns,terminal).
See :py:meth:`~rlpy.domains.domain.Domain.step`.
:param a: The action to attempt
:param num_samples: The number of next states and rewards to be sampled.
:return: A tuple of arrays ( S[], A[] ) where
*S* is an array of next states,
*A* is an array of rewards for those states.
"""
next_states = []
rewards = []
s = self.state.copy()
for i in range(num_samples):
r, ns, terminal = self.step(a)
self.state = s.copy()
next_states.append(ns)
rewards.append(r)
return np.array(next_states), np.array(rewards)
def __copy__(self):
cls = self.__class__
result = | |
# Copyright (C) 2002-2007 Python Software Foundation
# Contact: <EMAIL>
"""Email address parsing code.
Lifted directly from rfc822.py. This should eventually be rewritten.
"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future.builtins import int
__all__ = ["mktime_tz", "parsedate", "parsedate_tz", "quote"]
import time, calendar
SPACE = " "
EMPTYSTRING = ""
COMMASPACE = ", "
# Parse a date field
_monthnames = [
"jan",
"feb",
"mar",
"apr",
"may",
"jun",
"jul",
"aug",
"sep",
"oct",
"nov",
"dec",
"january",
"february",
"march",
"april",
"may",
"june",
"july",
"august",
"september",
"october",
"november",
"december",
]
_daynames = ["mon", "tue", "wed", "thu", "fri", "sat", "sun"]
# The timezone table does not include the military time zones defined
# in RFC822, other than Z. According to RFC1123, the description in
# RFC822 gets the signs wrong, so we can't rely on any such time
# zones. RFC1123 recommends that numeric timezone indicators be used
# instead of timezone names.
_timezones = {
"UT": 0,
"UTC": 0,
"GMT": 0,
"Z": 0,
"AST": -400,
"ADT": -300, # Atlantic (used in Canada)
"EST": -500,
"EDT": -400, # Eastern
"CST": -600,
"CDT": -500, # Central
"MST": -700,
"MDT": -600, # Mountain
"PST": -800,
"PDT": -700, # Pacific
}
def parsedate_tz(data):
"""Convert a date string to a time tuple.
Accounts for military timezones.
"""
res = _parsedate_tz(data)
if not res:
return
if res[9] is None:
res[9] = 0
return tuple(res)
def _parsedate_tz(data):
"""Convert date to extended time tuple.
The last (additional) element is the time zone offset in seconds, except if
the timezone was specified as -0000. In that case the last element is
None. This indicates a UTC timestamp that explicitly declaims knowledge of
the source timezone, as opposed to a +0000 timestamp that indicates the
source timezone really was UTC.
"""
if not data:
return
data = data.split()
# The FWS after the comma after the day-of-week is optional, so search and
# adjust for this.
if data[0].endswith(",") or data[0].lower() in _daynames:
# There's a dayname here. Skip it
del data[0]
else:
i = data[0].rfind(",")
if i >= 0:
data[0] = data[0][i + 1 :]
if len(data) == 3: # RFC 850 date, deprecated
stuff = data[0].split("-")
if len(stuff) == 3:
data = stuff + data[1:]
if len(data) == 4:
s = data[3]
i = s.find("+")
if i == -1:
i = s.find("-")
if i > 0:
data[3:] = [s[:i], s[i:]]
else:
data.append("") # Dummy tz
if len(data) < 5:
return None
data = data[:5]
[dd, mm, yy, tm, tz] = data
mm = mm.lower()
if mm not in _monthnames:
dd, mm = mm, dd.lower()
if mm not in _monthnames:
return None
mm = _monthnames.index(mm) + 1
if mm > 12:
mm -= 12
if dd[-1] == ",":
dd = dd[:-1]
i = yy.find(":")
if i > 0:
yy, tm = tm, yy
if yy[-1] == ",":
yy = yy[:-1]
if not yy[0].isdigit():
yy, tz = tz, yy
if tm[-1] == ",":
tm = tm[:-1]
tm = tm.split(":")
if len(tm) == 2:
[thh, tmm] = tm
tss = "0"
elif len(tm) == 3:
[thh, tmm, tss] = tm
elif len(tm) == 1 and "." in tm[0]:
# Some non-compliant MUAs use '.' to separate time elements.
tm = tm[0].split(".")
if len(tm) == 2:
[thh, tmm] = tm
tss = 0
elif len(tm) == 3:
[thh, tmm, tss] = tm
else:
return None
try:
yy = int(yy)
dd = int(dd)
thh = int(thh)
tmm = int(tmm)
tss = int(tss)
except ValueError:
return None
# Check for a yy specified in two-digit format, then convert it to the
# appropriate four-digit format, according to the POSIX standard. RFC 822
# calls for a two-digit yy, but RFC 2822 (which obsoletes RFC 822)
# mandates a 4-digit yy. For more information, see the documentation for
# the time module.
if yy < 100:
# The year is between 1969 and 1999 (inclusive).
if yy > 68:
yy += 1900
# The year is between 2000 and 2068 (inclusive).
else:
yy += 2000
tzoffset = None
tz = tz.upper()
if tz in _timezones:
tzoffset = _timezones[tz]
else:
try:
tzoffset = int(tz)
except ValueError:
pass
if tzoffset == 0 and tz.startswith("-"):
tzoffset = None
# Convert a timezone offset into seconds ; -0500 -> -18000
if tzoffset:
if tzoffset < 0:
tzsign = -1
tzoffset = -tzoffset
else:
tzsign = 1
tzoffset = tzsign * ((tzoffset // 100) * 3600 + (tzoffset % 100) * 60)
# Daylight Saving Time flag is set to -1, since DST is unknown.
return [yy, mm, dd, thh, tmm, tss, 0, 1, -1, tzoffset]
def parsedate(data):
"""Convert a time string to a time tuple."""
t = parsedate_tz(data)
if isinstance(t, tuple):
return t[:9]
else:
return t
def mktime_tz(data):
"""Turn a 10-tuple as returned by parsedate_tz() into a POSIX timestamp."""
if data[9] is None:
# No zone info, so localtime is better assumption than GMT
return time.mktime(data[:8] + (-1,))
else:
t = calendar.timegm(data)
return t - data[9]
def quote(str):
"""Prepare string to be used in a quoted string.
Turns backslash and double quote characters into quoted pairs. These
are the only characters that need to be quoted inside a quoted string.
Does not add the surrounding double quotes.
"""
return str.replace("\\", "\\\\").replace('"', '\\"')
class AddrlistClass(object):
"""Address parser class by <NAME>.
To understand what this class does, it helps to have a copy of RFC 2822 in
front of you.
Note: this class interface is deprecated and may be removed in the future.
Use email.utils.AddressList instead.
"""
def __init__(self, field):
"""Initialize a new instance.
`field' is an unparsed address header field, containing
one or more addresses.
"""
self.specials = '()<>@,:;."[]'
self.pos = 0
self.LWS = " \t"
self.CR = "\r\n"
self.FWS = self.LWS + self.CR
self.atomends = self.specials + self.LWS + self.CR
# Note that RFC 2822 now specifies `.' as obs-phrase, meaning that it
# is obsolete syntax. RFC 2822 requires that we recognize obsolete
# syntax, so allow dots in phrases.
self.phraseends = self.atomends.replace(".", "")
self.field = field
self.commentlist = []
def gotonext(self):
"""Skip white space and extract comments."""
wslist = []
while self.pos < len(self.field):
if self.field[self.pos] in self.LWS + "\n\r":
if self.field[self.pos] not in "\n\r":
wslist.append(self.field[self.pos])
self.pos += 1
elif self.field[self.pos] == "(":
self.commentlist.append(self.getcomment())
else:
break
return EMPTYSTRING.join(wslist)
def getaddrlist(self):
"""Parse all addresses.
Returns a list containing all of the addresses.
"""
result = []
while self.pos < len(self.field):
ad = self.getaddress()
if ad:
result += ad
else:
result.append(("", ""))
return result
def getaddress(self):
"""Parse the next address."""
self.commentlist = []
self.gotonext()
oldpos = self.pos
oldcl = self.commentlist
plist = self.getphraselist()
self.gotonext()
returnlist = []
if self.pos >= len(self.field):
# Bad email address technically, no domain.
if plist:
returnlist = [(SPACE.join(self.commentlist), plist[0])]
elif self.field[self.pos] in ".@":
# email address is just an addrspec
# this isn't very efficient since we start over
self.pos = oldpos
self.commentlist = oldcl
addrspec = self.getaddrspec()
returnlist = [(SPACE.join(self.commentlist), addrspec)]
elif self.field[self.pos] == ":":
# address is a group
returnlist = []
fieldlen = len(self.field)
self.pos += 1
while self.pos < len(self.field):
self.gotonext()
if self.pos < fieldlen and self.field[self.pos] == ";":
self.pos += 1
break
returnlist = returnlist + self.getaddress()
elif self.field[self.pos] == "<":
# Address is a phrase then a route addr
routeaddr = self.getrouteaddr()
if self.commentlist:
returnlist = [
(
SPACE.join(plist) + " (" + " ".join(self.commentlist) + ")",
routeaddr,
)
]
else:
returnlist = [(SPACE.join(plist), routeaddr)]
else:
if plist:
returnlist = [(SPACE.join(self.commentlist), plist[0])]
elif self.field[self.pos] in self.specials:
self.pos += 1
self.gotonext()
if self.pos < len(self.field) and self.field[self.pos] == ",":
self.pos += 1
return returnlist
def getrouteaddr(self):
"""Parse a route address (Return-path value).
This method just skips all the route stuff and returns the addrspec.
"""
if self.field[self.pos] != "<":
return
expectroute = False
self.pos += 1
self.gotonext()
adlist = ""
while self.pos < len(self.field):
if expectroute:
self.getdomain()
expectroute = False
elif self.field[self.pos] == ">":
self.pos += 1
break
elif self.field[self.pos] == "@":
self.pos += | |
#!/usr/bin/env python3
'''
USFM to HTML is a script designed to make it easier to print Scripture from USFM files.
The suite of tags supported is currently limited because the tool is developed by Wycliffe
Associates, and the number of tags our tools use is limited.
Developed by <NAME> and released for general use.
'''
#usfm2html.py
#version 0.6
# by <NAME> -- for Tech Advance
# This script reads a USFM 2 file and outputs a "pretty"
# HTML version of the file.
# It is intended that the HTML will then be further tweaked
# in LibreOffice or something similar, but of course you're
# free to do as you please.
#
# It will not check for a HTML file before writing to it.
# I will decide later if that is a bug or a feature.
#
# At this point it will only work on a file. Later it may work
# on a folder/directory
# It also does not work with wildcards for multiple files (i.e. *.usfm)
# Change in v. 0.7
# Added a simple graphical user interface that allows the user to pick a USFM file to convert.
# Change in v. 0.6
# Changed /s5 to print a horizontal line. Also changed paragraph start/stop because of this.
# Change in v. 0.5:
# Added code to ignore the Strong's numbers, etc., in USFM 3 files
# Change in v. 0.4:
# Removed <!--NewPage--> comment because it caused problems in LibreOffice, and didn't seem to work as intended.
# Changes in v. 0.3:
# Correctly deals with UTF-8 files that otherwise may not display correctly.
# Changes in v. 0.2:
# Adding/changing code to deal with Bahasa USFM files
# Usage: python3 usfm2html.py <path to USFM file>
# Usage with GUI: python3 usfm2html.py
# The output of this command can be run through html2ps and then ps2pdf in order to produce pdf output.
# Please note that any errors in the USFM will likely produce formatting errors in the HTML, and thus in the PDF.
#Import necessary python components
import numpy as np
import pandas as pd
import os # file system commands
import os.path # commands to handle paths
import re # regular expressions
import sys # command line arguments
import shutil # high level file operations
from MainGUI import *
def convert_to_html(convert_file):
"""
Convert USFM text (read from a file) to HTML. Use styles so that we can later
touch this up in LibreOffice without too much trouble.
We include a link to a default stylesheet: bible_styles.css, so that we can
easily override the internal styles by dropping a .css file in to the same
directory as our html.
Parameters
----------
This should be fed one USFM file at a time.
This function writes to a file.
"""
# Variable initialization
localized.book_name="Unknown" # Script was failing with 'book_name unknown' errors. This initializes the variable before it can be needed.
localized.chapter = "Chapter"
footnoteNum = 1
if not re.search(r'.u?sfm',convert_file, flags=re.IGNORECASE):
print("Not a USFM file as far as I can tell.")
sys.exit(1)
# Extract the file name from the path
file_name = os.path.basename(convert_file)
# Generate taket_file name
target_file=re.sub(r'.u?sfm','.html', file_name, flags=re.IGNORECASE)
# Add target_file to the same path as convert_file
target_file = os.path.dirname(convert_file) + os.sep + target_file
print("Converting "+convert_file+" to "+target_file+"\n")
with open(target_file, "w+") as newfile:
newfile.write('\n'.join([
'<!DOCTYPE html>',
'<html >',
' <head>',
' <meta charset="UTF-8">',
' <title></title>',
' <style type="text/css">',
' .verse_number {color:darkgrey}',
' h1, h2, h3, .section {text-align: center}',
' .footnote {color:grey;}',
' .chapter {page-break-before: always}',
' .chapitre {font-style: italic}',
' .book_title {text-decoration:underline}',
' </style>',
' <link href="bible_styles.css" rel="stylesheet">',
' </head>',
' <body>']))
with open(convert_file) as input_file: #open the usfm file and scan it
# The tags we can expect in a USFM file are:
# \id : Book ID code
# \ide : Character encoding specification. If this is not UTF-8, we're probably in trouble.
# \h : Running header text. *Deprecated* use of numbered variable syntax.
# Formerly \h# text was used for different lines of the running header
# \toc1: Long Table of Contents text. At this point we don't use a different name in the ToC
# \toc2: Similar. Seldom any different than \toc1, although it could be shorter (if,
# for example, \toc1 used "The Book of ..." in its title)
# \toc3: Very unlikely for us to need this, as it's a very short form of the book name.
# \mt : Major title. This is probably what we'll use for the book title, unless we use \h
# This can be combined with numbers to make a grand, multi-line title
# \s5 : Section heading #5. Actually, this is used for chunk division. As such, in
# English it can make a decent paragraph break. Unfortunately, in other languages,
# this just goes where it was in the source. The other \s# tags are used for section
# headings.
# \c : Followed by a digit, this marks a chapter.
# \cl : Followed by a word, becomes the word for "chapter". Should be used *after* the
# chapter number (\c #) unless it changes the whole book (i.e. \cl Psalm) in which
# case it should go before the first chapter mark (\c 1).
# \v : Verse marker. There is another verse tag, \vp ... \vp* that indicates a non-Western
# number for the verse number.
# \p : Paragraph marker. Like <p> in HTML
# \f : Beginning of a footnote. This is more complicated than the tags we've seen so far.
# Footnotes are supposed to be bracketed by /f and /f*. However, there is also supposed
# to be a + indicating that "the caller should be generated automatically by the editor
# or publishing tools". The alternative is a -, which "indicates that no caller should
# be generated". So, I think that means "/f -" indicates a footnote for the translator/
# editor's use, only, and not for others to view. \fq is used to mark a quotation in the
# footnote. In general, this is so the quotation can be italicized, or emphasized in some
# way. An additional mark, \fqa, can be used for marking an alternate text, which is
# generally displayed in the same form as a quotation, but some software might want to know
# that it's a different sort of thing. We do sometimes use \ft (usually as \f + \ft) to show the
# beginning of the actual note text.
#
# We don't use the \fr (footnote reference), \fk (footnote refers to this key word), \fl (footnote label
# text), \fw (footnote witness list), \fp (paragraph break in the footnote), \fv ... \fv* (mark for a verse
# number in the footnote (so that it can be superscripted))
# \q : Quotation. \q1, \q2, \q3, etc., indicate increasing indentation.
# \m : Continuation (margin) paragraph. This is where a paragraph continues after a quote, and shouldn't be indented.
for line in input_file:
line=line.rstrip()
# strip out tags we don't want/need
if re.match(r'\\toc2',line):
line=None
elif re.match(r'\\toc3',line):
line=None
elif re.match(r'\\id(e?)',line):
line=None
elif re.match(r'\\rem',line):
line=None
elif re.match(r'\\b',line):
line=None
elif re.match(r'\\mt',line):
line=None
#strip out tags we don't want/need
#elif re.match(r'^\\',line):
else:
book=re.match(r'\\h (.+)',line) # we use the \h tag for the book name
if book:
book_name=book.group(1)
#if '\\' not in line:
# line=re.sub(r'(.+)','<h3 class="chapitre">\\1</h3>',line) # print lines that have no tags as just the content of the line
line=re.sub(r'\\v (\d+?) ','<span class="verse_number"><sup>\\1 </sup></span>',line) # change verse marking
line=re.sub(r'\\toc1 (.+)','<h2 class="fancy_name">\\1</h2>',line) # set fancy name of book
line=re.sub(r'\\h (.+)','<h1 class="book_name">\\1</h1>',line) # set the book name
line=re.sub(r'\\c (\d+)','<pagebreak/><?page-break><h3 class="chapter">'+book_name+' Chapter \\1</h3>',line) # change chapter marking
line=re.sub(r'\\s (.+)','<h4 class="section">\\1</h4>',line) # section headings from .sfm files
line=re.sub(r'\\q1','<p class="quote1">\\1</p>',line) # quote level 1
line=re.sub(r'\\q2','<p class="quote2">\\1</p>',line) # quote level 2
line=re.sub(r'\\q','<p class="quote1">\\1</p>',line) # quote without # is Quote 1
line=re.sub(r'\\p|\\m','</p><p>',line) # close paragraph marks
# line=re.sub(r'\\s5','<hr>',line) # We had a request to print 'by chunk'. This line can be uncommented for that, or we can implement a pref
line=re.sub(r'\\f\*','</span>',line) #close footnotes
| |
<filename>pandasreg/rperiod.py<gh_stars>1-10
import numpy as np
from datetime import datetime, date
import dateutil.parser
from pandas.tseries.index import Int64Index, Index
import pandas as pd
import pandas.core.common as com
import pandas._algos as _algos
from pandas.tseries.tools import parse_time_string
from rfreq import RFrequency
class RPeriod(object):
"""
Represents a regularly spaced period, which can be incremented, decremented,
and compared to other periods of the same frequency.
"""
def __init__(self, value=None, freq=None, ordinal=None):
"""
Arguments:
value (RPeriod, datetime, Timestamp, date, str): a date to be
converted to a period at the given frequency. Either the value or
ordinal must be supplied, but not both.
freq (str, RFrequency): frequency of the period
ordinal (int): an ordinal period at the given frequency. Either the
value or ordinal must be supplied, but not both.
"""
if freq is not None:
if isinstance(freq, basestring):
self.freq = RFrequency.init(freq)
elif isinstance(freq, RFrequency):
self.freq = freq
else:
raise ValueError("Frequency must be a string or frequency class")
if ordinal is not None and value is not None:
raise ValueError("Only value or ordinal but not both should be given")
elif ordinal is not None:
if not com.is_integer(ordinal):
raise ValueError("Ordinal must be an integer")
if freq is None:
raise ValueError("Must supply freq for ordinal value")
self.ordinal = ordinal
elif isinstance(value, RPeriod):
if freq is None or value.freq == self.freq:
# print("HERE")
self.ordinal = value.ordinal
self.freq = value.freq
else:
self.ordinal = value.asfreq(self.freq)
elif isinstance(value, (datetime, pd.Timestamp)):
if freq is None:
raise ValueError("Must supply freq for datetime/Timestamp value")
self.ordinal = self.freq.to_ordinal(value)
elif isinstance(value, date):
if freq is None:
raise ValueError("Must supply freq for datetime value")
self.ordinal = self.freq.to_ordinal(datetime(value.year, value.month, value.day))
elif isinstance(value, basestring):
self.ordinal = _string_to_period(value).asfreq(freq).ordinal
else:
raise ValueError("Value must be RPeriod, integer, datetime/date, or valid string")
def __lt__(self, other):
if isinstance(other, RPeriod):
if self.freq != other.freq:
raise ValueError("Can only compare periods with same frequency")
return self.ordinal < other.ordinal
raise ValueError("Can only compare to another RPeriod")
def __le__(self, other):
return self < other or self == other
def __eq__(self, other):
if isinstance(other, RPeriod):
return self.ordinal == other.ordinal and self.freq == other.freq
raise ValueError("Can only compare to another RPeriod")
def __ne__(self, other):
return not self == other
def __ge__(self, other):
return self > other or self == other
def __gt__(self, other):
if isinstance(other, RPeriod):
if self.freq != other.freq:
raise ValueError("Can only compare periods with same frequency")
return self.ordinal > other.ordinal
raise ValueError("Can only compare to another RPeriod")
def __hash__(self):
return hash((self.ordinal, self.freq))
def __add__(self, other):
if com.is_integer(other):
return RPeriod(ordinal=self.ordinal + other, freq=self.freq)
else:
raise TypeError(other)
def __sub__(self, other):
if com.is_integer(other):
return RPeriod(ordinal=self.ordinal - other, freq=self.freq)
if isinstance(other, RPeriod):
if other.freq != self.freq:
raise ValueError("Cannot do arithmetic with non-conforming periods")
return self.ordinal - other.ordinal
else:
raise TypeError(other)
def asfreq(self, freq, how='E', overlap=True):
return RPeriod(ordinal=self.freq.asfreq(self.ordinal, freq, how,
overlap), freq=freq)
def to_timestamp(self):
# TODO: support freq, how option
return self.freq.to_timestamp(self.ordinal)
def to_datetime(self, freq=None):
return self.to_timestamp().to_pydatetime()
def __repr__(self):
return self.freq.to_timestamp(self.ordinal)
def __str__(self):
dt = self.freq.to_timestamp(self.ordinal)
return self.freq.format(dt)
def strftime(self, fmt):
return self.freq.to_timestamp(self.ordinal).strftime(fmt)
class RPeriodIndex(Int64Index):
"""
This class is based on pandas' PeriodIndex and the initalization
arguments are almost the same. The one additional argument is `observed`.
Arguments:
data: a list of datetimes, Timestamps, or datetime strings
ordinal: a list of ordinal periods that can be provided instead of the
data argument.
freq (str, RFrequency): frequency of the index
start: starting period
end: ending period
periods: # of periods
name: a name for the index
observed: this option controls how a Series or DataFrame will be
resampled if the user does not provide an explicit method. Options can
be any of those that are provided to the pandas resample function.
"""
def __new__(cls, data=None, ordinal=None,
freq=None, start=None, end=None, periods=None,
name=None, observed=None):
if freq is None:
if start is not None and isinstance(start, RPeriod):
freq = start.freq
elif end is not None and isinstance(end, RPeriod):
freq = end.freq
else:
raise ValueError("Must supply frequency")
if isinstance(freq, basestring):
freq = RFrequency.init(freq)
if data is None:
if ordinal is not None:
data = np.asarray(ordinal, dtype=np.int64)
else:
data = cls._get_ordinal_range(start, end, periods, freq)
else:
ordinal = cls._from_arraylike(data, freq)
data = np.array(ordinal, dtype=np.int64, copy=False)
if observed is None:
observed = "mean"
subarr = data.view(cls)
subarr.name = name
subarr.freq = freq
subarr.observed = observed
return subarr
@classmethod
def _get_ordinal_range(cls, start, end, periods, freq):
if isinstance(start, datetime):
start = freq.to_ordinal(start)
elif isinstance(start, RPeriod):
start = start.ordinal
elif isinstance(start, basestring):
start = _string_to_period(start).asfreq(freq).ordinal
if isinstance(end, datetime):
end = freq.to_ordinal(end)
elif isinstance(end, RPeriod):
end = end.ordinal
elif isinstance(end, basestring):
end = _string_to_period(end).asfreq(freq).ordinal
if periods is not None:
if start is None:
data = np.arange(end - periods + 1, end + 1, dtype=np.int64)
else:
data = np.arange(start, start + periods, dtype=np.int64)
else:
data = np.arange(start, end+1, dtype=np.int64)
return data
@classmethod
def _from_arraylike(cls, data, freq):
if not isinstance(data, np.ndarray):
data = [freq.to_ordinal(datetime(x.year, x.month, x.day)) for x in data]
else:
if isinstance(data, RPeriodIndex):
if freq == data.freq:
data = data.values
else:
pass
else:
pass
return data
def asfreq(self, freq, how='E', overlap=True):
"""Convert the periods in the index to another frequency.
See the RFrequency.asfreq() documention for more information
"""
if isinstance(freq, basestring):
freq = RFrequency.init(freq)
if freq.freqstr == self.freq.freqstr:
return self
return type(self)(ordinal=self.freq.np_asfreq(self.values, freq, how,
overlap), freq=freq)
@property
def freqstr(self):
"""String representation of the index's frequency"""
return self.freq.freqstr
def __contains__(self, key):
if not isinstance(key, RPeriod) or key.freq != self.freq:
if isinstance(key, basestring):
try:
self.get_loc(key)
return True
except Exception:
return False
return False
return key.ordinal in self._engine
@property
def is_full(self):
"""
Returns True if there are any missing periods from start to end
"""
if len(self) == 0:
return True
if not self.is_monotonic:
raise ValueError('Index is not monotonic')
values = self.values
return ((values[1:] - values[:-1]) < 2).all()
def __array_finalize__(self, obj):
if self.ndim == 0: # pragma: no cover
return self.item()
self.freq = getattr(obj, 'freq', None)
self.observed = getattr(obj, 'observed', None)
def map(self, f):
try:
return f(self)
except:
values = self._get_object_array()
return _algos.arrmap_object(values, f)
def shift(self, n):
if n == 0:
return self
return RPeriodIndex(data=self.values + n, freq=self.freq,
observed=self.observed)
def __add__(self, other):
return RPeriodIndex(ordinal=self.values + other, freq=self.freq,
observed=self.observed)
def __sub__(self, other):
return RPeriodIndex(ordinal=self.values - other, freq=self.freq,
observed=self.observed)
def __getitem__(self, key):
arr_idx = self.view(np.ndarray)
if np.isscalar(key):
val = arr_idx[key]
return RPeriod(ordinal=val, freq=self.freq)
else:
if com._is_bool_indexer(key):
key = np.asarray(key)
result = arr_idx[key]
if result.ndim > 1:
# MPL kludge
# values = np.asarray(list(values), dtype=object)
# return values.reshape(result.shape)
return RPeriodIndex(result, name=self.name, freq=self.freq,
observed=self.observed)
return RPeriodIndex(result, name=self.name, freq=self.freq,
observed=self.observed)
def join(self, other, how='left', level=None, return_indexers=False):
self._assert_can_do_setop(other)
result = Int64Index.join(self, other, how=how, level=level,
return_indexers=return_indexers)
if return_indexers:
result, lidx, ridx = result
return self._apply_meta(result), lidx, ridx
else:
return self._apply_meta(result)
def _assert_can_do_setop(self, other):
if not isinstance(other, RPeriodIndex):
raise ValueError('can only call with other RPeriodIndex-ed objects')
if self.freq != other.freq:
raise ValueError('Only like-indexed RPeriodIndexes compatible for join')
def _wrap_union_result(self, other, result):
name = self.name if self.name == other.name else None
result = self._apply_meta(result)
result.name = name
return result
def _apply_meta(self, rawarr):
idx = rawarr.view(RPeriodIndex)
idx.freq = self.freq
idx.observed = self.observed;
return idx
def __iter__(self):
for val in self.values:
# yield RPeriod(ordinal=val, freq=self.freq)
yield val
@property
def inferred_type(self):
# b/c data is represented as ints make sure we can't have ambiguous
# indexing
return 'period'
def get_value(self, series, key):
try:
return super(RPeriodIndex, self).get_value(series, key)
except (KeyError, IndexError):
try:
period = _string_to_period(key)
vals = self.values
# if our data is higher resolution than requested key, slice
if period.freq < self.freq:
ord1 = period.asfreq(self.freq, how='S').ordinal
ord2 = period.asfreq(self.freq, how='E').ordinal
if ord2 < vals[0] or ord1 > vals[-1]:
raise KeyError(key)
pos = np.searchsorted(self.values, [ord1, ord2])
key = slice(pos[0], pos[1] + 1)
return series[key]
else:
key = period.asfreq(self.freq)
return self._engine.get_value(series, key.ordinal)
except TypeError:
pass
except KeyError:
pass
key = RPeriod(key, self.freq)
return self._engine.get_value(series, key.ordinal)
def get_loc(self, key):
try:
return self._engine.get_loc(key)
except KeyError:
if com.is_integer(key):
return key
try:
key = _string_to_period(key)
except TypeError:
pass
key = RPeriod(key, self.freq).ordinal
return self._engine.get_loc(key)
def slice_locs(self, start=None, end=None):
"""
Index.slice_locs, customized to handle partial ISO-8601 string slicing
"""
if isinstance(start, basestring) or isinstance(end, basestring):
try:
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.